code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
A : Any = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _lowerCamelCase ( lowerCamelCase_: int = 5000 ):
'''simple docstring'''
A : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , _SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
A : List[str] = pentagonal_nums[j]
A : str = pentagonal_i + pentagonal_j
A : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_SCREAMING_SNAKE_CASE ) and is_pentagonal(_SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''') | 256 | from __future__ import annotations
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 0 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [0]
SCREAMING_SNAKE_CASE_ = [0]
SCREAMING_SNAKE_CASE_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase, _lowercase, _lowercase, _lowercase ), 0 )
SCREAMING_SNAKE_CASE_ = [60]
SCREAMING_SNAKE_CASE_ = [10]
SCREAMING_SNAKE_CASE_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase, _lowercase, _lowercase, _lowercase ), 0 )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = [1, 2, 3]
SCREAMING_SNAKE_CASE_ = [3, 2, 1]
SCREAMING_SNAKE_CASE_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase, _lowercase, _lowercase, _lowercase ), 5 )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = 50
SCREAMING_SNAKE_CASE_ = [60, 100, 120]
SCREAMING_SNAKE_CASE_ = [10, 20, 30]
SCREAMING_SNAKE_CASE_ = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase, _lowercase, _lowercase, _lowercase ), 220 )
if __name__ == "__main__":
unittest.main()
| 238 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCAmelCase : Tuple = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 454 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : Any = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : Any = '''cpu'''
_lowerCAmelCase : Dict = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_lowerCAmelCase : Union[str, Any] = '''path-to-your-trained-model'''
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Any = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : int = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
_lowerCAmelCase : List[str] = torch.rand(1) * 999
_lowerCAmelCase : List[Any] = torch.randn(2, 77, 768)
_lowerCAmelCase : int = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Dict = 666
_lowerCAmelCase : Tuple = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Tuple = {'''generator''': generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : int = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 454 | 1 |
'''simple docstring'''
import heapq
def lowerCAmelCase( a__ : dict ):
'''simple docstring'''
lowerCamelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(a__ , [-1 * len(a__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase__ = heapq.heappop(a__ )[1][0]
chosen_vertices.add(a__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase__ = elem[1][1].index(a__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(a__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 426 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] ='''xglm'''
__lowerCAmelCase : str =['''past_key_values''']
__lowerCAmelCase : Dict ={
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , UpperCamelCase=25_60_08 , UpperCamelCase=20_48 , UpperCamelCase=10_24 , UpperCamelCase=40_96 , UpperCamelCase=24 , UpperCamelCase=16 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0_2 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , **UpperCamelCase , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = ffn_dim
lowerCamelCase__ = num_layers
lowerCamelCase__ = attention_heads
lowerCamelCase__ = activation_function
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = init_std
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = use_cache
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , **UpperCamelCase , )
| 426 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ : Optional[Any] = logging.getLogger()
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowercase__ = parser.parse_args()
return args.f
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> None:
"""simple docstring"""
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase):
lowercase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase , 0.6_66)
@slow
@require_torch_non_multi_gpu
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCAmelCase)
lowercase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCAmelCase)
lowercase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCAmelCase)
| 622 |
def _lowerCAmelCase ( A__ = 50_000_000 ):
lowercase__ = set()
lowercase__ = int((limit - 24) ** (1 / 2) )
lowercase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , A__ ) ) )
for primea in primes:
lowercase__ = primea * primea
for primea in primes:
lowercase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ = primea * primea * primea * primea
lowercase__ = square + cube + tetr
if total >= limit:
break
ret.add(A__ )
return len(A__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 622 | 1 |
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = botoa.client("""iam""" )
_lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCamelCase , AssumeRolePolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) )
_lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCamelCase , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'role {role_name} already exists. Using existing one' )
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__lowerCamelCase )["Role"]["Arn"]
def A ():
_lowerCAmelCase = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __lowerCamelCase , )
_lowerCAmelCase = None
if credentials_configuration == 0:
_lowerCAmelCase = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
_lowerCAmelCase = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
_lowerCAmelCase = _ask_field("""AWS Access Key ID: """ )
_lowerCAmelCase = aws_access_key_id
_lowerCAmelCase = _ask_field("""AWS Secret Access Key: """ )
_lowerCAmelCase = aws_secret_access_key
_lowerCAmelCase = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
_lowerCAmelCase = aws_region
_lowerCAmelCase = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __lowerCamelCase , )
if role_management == 0:
_lowerCAmelCase = _ask_field("""Enter your IAM role name: """ )
else:
_lowerCAmelCase = """accelerate_sagemaker_execution_role"""
print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(__lowerCamelCase )
_lowerCAmelCase = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_custom_docker_image:
_lowerCAmelCase = _ask_field("""Enter your Docker image: """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() )
_lowerCAmelCase = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_sagemaker_inputs_enabled:
_lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
_lowerCAmelCase = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_sagemaker_metrics_enabled:
_lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
_lowerCAmelCase = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
_lowerCAmelCase = {}
_lowerCAmelCase = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
if use_dynamo:
_lowerCAmelCase = """dynamo_"""
_lowerCAmelCase = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_lowerCAmelCase = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
if use_custom_options:
_lowerCAmelCase = _ask_options(
"""Which mode do you want to use?""" , __lowerCamelCase , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(__lowerCamelCase )] , default="""default""" , )
_lowerCAmelCase = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
_lowerCAmelCase = _ask_options(
__lowerCamelCase , __lowerCamelCase , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_lowerCAmelCase = _ask_field(__lowerCamelCase , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , default="""ml.p3.2xlarge""" )
_lowerCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_lowerCAmelCase = _ask_field(
"""How many machines do you want use? [1]: """ , __lowerCamelCase , default=1 , )
_lowerCAmelCase = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowerCamelCase , use_cpu=__lowerCamelCase , dynamo_config=__lowerCamelCase , eca_instance_type=__lowerCamelCase , profile=__lowerCamelCase , region=__lowerCamelCase , iam_role_name=__lowerCamelCase , mixed_precision=__lowerCamelCase , num_machines=__lowerCamelCase , sagemaker_inputs_file=__lowerCamelCase , sagemaker_metrics_file=__lowerCamelCase , )
| 709 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = np.max(__lowerCamelCase , axis=-1 , keepdims=__lowerCamelCase )
_lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "second_text" in kwargs:
_lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None ):
"""simple docstring"""
return self.tokenizer(_lowercase , text_pair=_lowercase , return_tensors=self.framework )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.model(**_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.logits[0].numpy()
_lowerCAmelCase = softmax(_lowercase )
_lowerCAmelCase = np.argmax(_lowercase )
_lowerCAmelCase = self.model.config.idalabel[best_class]
_lowerCAmelCase = probabilities[best_class].item()
_lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 162 | 0 |
"""simple docstring"""
_A = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowercase_ ( __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCAmelCase )
elif i == ")":
# RULE 4
lowerCAmelCase__ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : Optional[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Any = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operators[opr](__UpperCAmelCase , __UpperCAmelCase )
operand_stack.push(__UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 299 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : str , *UpperCamelCase : int , **UpperCamelCase : str ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 299 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : str = self._create_example_records()
A : int = Dataset.from_list(SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(SCREAMING_SNAKE_CASE ):
self.assertDictEqual(SCREAMING_SNAKE_CASE , example_records[i] )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = self._create_example_records()
A : Optional[int] = Dataset.from_list(SCREAMING_SNAKE_CASE )
A : Dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self ) -> Tuple: # checks what happens with missing columns
"""simple docstring"""
A : Dict = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
A : Optional[Any] = Dataset.from_list(SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
"""simple docstring"""
A : Optional[Any] = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
A : Dict = Dataset.from_list(SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : str = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 343 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
__magic_name__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__magic_name__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__magic_name__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# No kwarg
A : Dict = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Any = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
A : List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
A : Dict = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(SCREAMING_SNAKE_CASE , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=SCREAMING_SNAKE_CASE , )
self.run_entailment_id(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[Any] = zero_shot_classifier.model.config
A : int = config.labelaid
A : Union[str, Any] = zero_shot_classifier.entailment_id
A : str = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A : Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A : Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A : Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 343 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = len(A__ )
for i in range(A__ ):
for j in range(i + 1 , A__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : Tuple = input("Enter numbers separated by a comma:\n").strip()
__A : int = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 275 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : int = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roc_bert'
def __init__( self : str , lowerCamelCase : Optional[Any]=3_05_22 , lowerCamelCase : List[Any]=7_68 , lowerCamelCase : int=12 , lowerCamelCase : str=12 , lowerCamelCase : Any=30_72 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int=5_12 , lowerCamelCase : List[Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : str=1E-12 , lowerCamelCase : int=True , lowerCamelCase : Optional[Any]=0 , lowerCamelCase : List[Any]="absolute" , lowerCamelCase : str=None , lowerCamelCase : str=True , lowerCamelCase : Tuple=True , lowerCamelCase : str=7_68 , lowerCamelCase : str=9_10 , lowerCamelCase : Tuple=5_12 , lowerCamelCase : List[str]=2_48_58 , lowerCamelCase : Dict=True , **lowerCamelCase : Tuple , ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Optional[int] = layer_norm_eps
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : List[Any] = enable_pronunciation
lowerCAmelCase_ : List[Any] = enable_shape
lowerCAmelCase_ : List[str] = pronunciation_embed_dim
lowerCAmelCase_ : List[str] = pronunciation_vocab_size
lowerCAmelCase_ : Dict = shape_embed_dim
lowerCAmelCase_ : Optional[Any] = shape_vocab_size
lowerCAmelCase_ : List[Any] = concat_input
lowerCAmelCase_ : Any = position_embedding_type
lowerCAmelCase_ : List[str] = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
| 275 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE (lowerCAmelCase ):
_UpperCamelCase = len(lowerCAmelCase ) // 2
# choose the middle 3 elements
_UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowercase : Union[str, Any] = None
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Any = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
lowercase : Tuple = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
lowercase : List[str] = """▁"""
class __A( __UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = BigBirdTokenizer
__A = ["input_ids", "attention_mask"]
__A = []
def __init__( self, A=None, A=None, A="<unk>", A="<s>", A="</s>", A="<pad>", A="[SEP]", A="[MASK]", A="[CLS]", **A, ):
"""simple docstring"""
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else bos_token
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else eos_token
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else unk_token
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else pad_token
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else cls_token
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
super().__init__(
A, tokenizer_file=A, bos_token=A, eos_token=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, **A, )
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self, A, A = None, A = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 105 | 0 |
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
lowercase = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
lowercase = hex_num[0] == '''-'''
if is_negative:
lowercase = hex_num[1:]
try:
lowercase = int(UpperCAmelCase, 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
lowercase = ''''''
while int_num > 0:
lowercase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 | def __UpperCAmelCase ( UpperCAmelCase = 50 )-> int:
"""simple docstring"""
lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 604 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( _lowerCAmelCase : list[int | str] ):
create_state_space_tree(_lowerCAmelCase , [] , 0 , [0 for i in range(len(_lowerCAmelCase ) )] )
def UpperCamelCase ( _lowerCAmelCase : list[int | str] , _lowerCAmelCase : list[int | str] , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , ):
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
for i in range(len(_lowerCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__a = True
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase )
current_sequence.pop()
__a = False
__A = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A = ["""A""", """B""", """C"""]
generate_all_permutations(sequence_a)
| 721 | """simple docstring"""
def UpperCamelCase ( ):
return 1
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int = 200 ):
return two_pound(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 173 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase ( a = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
__magic_name__ = BeautifulSoup(requests.get(a , headers=a ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
__magic_name__ = item.ha.text
__magic_name__ = '''https://www.amazon.in/''' + item.ha.a['''href''']
__magic_name__ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
__magic_name__ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
__magic_name__ = '''Not available'''
try:
__magic_name__ = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
__magic_name__ = ''''''
try:
__magic_name__ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
__magic_name__ = float('''nan''' )
except AttributeError:
pass
__magic_name__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ = ''' '''
__magic_name__ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCAmelCase = "headphones"
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 432 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCAmelCase = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase ( a , a , a=None , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Dict=13 , a__ : Tuple=7 , a__ : Any=True , a__ : Optional[int]=False , a__ : Dict=99 , a__ : str=16 , a__ : Tuple=2 , a__ : Union[str, Any]=4 , a__ : List[str]=4 , a__ : Dict="gelu" , a__ : List[str]=0.1 , a__ : str=0.1 , a__ : Optional[Any]=32 , a__ : Dict=2 , a__ : List[str]=1 , a__ : Tuple=0 , a__ : Optional[Any]=0.02 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def snake_case__ ( self : List[str] ):
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(a__ , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a__ , )
__magic_name__ = prepare_blenderbot_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : Dict , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] ):
__magic_name__ = 20
__magic_name__ = model_class_name(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] )
__magic_name__ , __magic_name__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , a__ , decoder_attention_mask=a__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a__ , )
__magic_name__ = model.decode(a__ , a__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def snake_case__ ( self : str , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] ):
__magic_name__ = 20
__magic_name__ = model_class_name(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] )
__magic_name__ , __magic_name__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , a__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a__ , decoder_position_ids=a__ , )
__magic_name__ = model.decode(a__ , a__ , decoder_attention_mask=a__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[int] = 99
def snake_case__ ( self : int ):
__magic_name__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(a__ )
__magic_name__ = lm_model(input_ids=a__ )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def snake_case__ ( self : List[Any] ):
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(a__ )
__magic_name__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=a__ , decoder_input_ids=a__ )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def snake_case__ ( self : int ):
__magic_name__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(a__ , 1 , 2 )
__magic_name__ = np.equal(a__ , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(a__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ,__a ):
__SCREAMING_SNAKE_CASE :List[str] = True
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE :List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : List[str] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a__ , a__ , a__ )
def snake_case__ ( self : str ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a__ , a__ , a__ )
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(a__ , a__ )
__magic_name__ = model_class(a__ )
@jax.jit
def encode_jitted(a__ : Union[str, Any] , a__ : Tuple=None , **a__ : Tuple ):
return model.encode(input_ids=a__ , attention_mask=a__ )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = encode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__magic_name__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(a__ : Union[str, Any] , a__ : List[Any] , a__ : Dict ):
return model.decode(
decoder_input_ids=a__ , decoder_attention_mask=a__ , encoder_outputs=a__ , )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = decode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(a__ )
self.assertIsNotNone(a__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : List[Any] ):
__magic_name__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__magic_name__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=a__ )
__magic_name__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__magic_name__ = ['''Sam''']
__magic_name__ = tokenizer(a__ , return_tensors='''jax''' )
__magic_name__ = model.generate(**a__ , **a__ )
__magic_name__ = '''Sam is a great name. It means "sun" in Gaelic.'''
__magic_name__ = tokenizer.batch_decode(a__ , **a__ )
assert generated_txt[0].strip() == tgt_text
| 432 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(snake_case__, snake_case__ )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Union[str, Any] = emb.weight.shape
__magic_name__ :List[Any] = nn.Linear(snake_case__, snake_case__, bias=snake_case__ )
__magic_name__ :Any = emb.weight.data
return lin_layer
def __lowercase ( snake_case, snake_case="facebook/mbart-large-en-ro", snake_case=False, snake_case=False ):
"""simple docstring"""
__magic_name__ :List[Any] = torch.load(snake_case__, map_location='''cpu''' )['''model''']
remove_ignore_keys_(snake_case__ )
__magic_name__ :List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :Any = MBartConfig.from_pretrained(snake_case__, vocab_size=snake_case__ )
if mbart_aa and finetuned:
__magic_name__ :str = '''relu'''
__magic_name__ :Optional[int] = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :Optional[Any] = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
__magic_name__ :int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 700 |
from __future__ import annotations
from math import pi, sqrt
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 19 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase__ = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = Github(os.environ['''GITHUB_TOKEN'''] )
_snake_case = g.get_repo('''huggingface/diffusers''' )
_snake_case = repo.get_issues(state='''open''' )
for issue in open_issues:
_snake_case = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_snake_case = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 224 | 0 |
'''simple docstring'''
import itertools
import math
def snake_case_ ( a__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(a__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( ):
"""simple docstring"""
__lowercase = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def snake_case_ ( a__ : int = 1_00_01 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,a__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE( datasets.BeamBasedBuilder ):
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=lowerCamelCase__ , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE( datasets.BeamBasedBuilder ):
def snake_case__ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=lowerCamelCase__ , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
def snake_case_ ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def snake_case_ ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class SCREAMING_SNAKE_CASE( __A ):
@require_beam
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowercase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , lowerCamelCase__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
import apache_beam as beam
__lowercase = beam.io.parquetio.WriteToParquet
__lowercase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__lowercase = partial(lowerCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowercase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , lowerCamelCase__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , lowerCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase = DummyBeamDataset(cache_dir=lowerCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowercase = NestedBeamDataset(cache_dir=lowerCamelCase__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__lowercase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , lowerCamelCase__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 163 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class a_ :
# setable values
A__ : Optional[int] = None
A__ : Optional[jnp.ndarray] = None
A__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCAmelCase( cls : str ):
"""simple docstring"""
return cls()
@dataclass
class a_ ( a ):
A__ : jnp.ndarray
A__ : jnp.ndarray
A__ : KarrasVeSchedulerState
class a_ ( a , a ):
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 100 , UpperCAmelCase__ : float = 1.007 , UpperCAmelCase__ : float = 80 , UpperCAmelCase__ : float = 0.05 , UpperCAmelCase__ : float = 50 , ):
"""simple docstring"""
pass
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : KarrasVeSchedulerState , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple = () ):
"""simple docstring"""
snake_case : List[str] = jnp.arange(0 , UpperCAmelCase__ )[::-1].copy()
snake_case : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCAmelCase__ , schedule=jnp.array(UpperCAmelCase__ , dtype=jnp.floataa ) , timesteps=UpperCAmelCase__ , )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : KarrasVeSchedulerState , UpperCAmelCase__ : jnp.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
snake_case : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
snake_case : Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case : Dict = random.split(UpperCAmelCase__ , num=1 )
snake_case : Any = self.config.s_noise * random.normal(key=UpperCAmelCase__ , shape=sample.shape )
snake_case : List[Any] = sigma + gamma * sigma
snake_case : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : KarrasVeSchedulerState , UpperCAmelCase__ : jnp.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : jnp.ndarray , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[Any] = sample_hat + sigma_hat * model_output
snake_case : Tuple = (sample_hat - pred_original_sample) / sigma_hat
snake_case : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase__ , derivative=UpperCAmelCase__ , state=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : KarrasVeSchedulerState , UpperCAmelCase__ : jnp.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : jnp.ndarray , UpperCAmelCase__ : jnp.ndarray , UpperCAmelCase__ : jnp.ndarray , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : List[Any] = sample_prev + sigma_prev * model_output
snake_case : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
snake_case : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase__ , derivative=UpperCAmelCase__ , state=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : KarrasVeSchedulerState , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ):
"""simple docstring"""
raise NotImplementedError()
| 598 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Any = metric_id
class a_ :
A__ : Dict = [MetricMock(a ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
if "tmp_path" in args:
snake_case : str = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__magic_name__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*__magic_name__ )
| 598 | 1 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__snake_case = '''.'''
if __name__ == "__main__":
__snake_case = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__snake_case = []
__snake_case = []
with open(doctest_file_path) as fp:
for line in fp:
__snake_case = line.strip()
__snake_case = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__snake_case = '''\n'''.join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''') | 704 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Any=False ):
"""simple docstring"""
_a = '''backbone.''' if is_semantic else ''''''
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', '''beit.embeddings.cls_token'''),
(f'{prefix}patch_embed.proj.weight', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'{prefix}patch_embed.proj.bias', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'{prefix}pos_embed', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=False, _lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_a = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
_a = in_proj_weight[
: config.hidden_size, :
]
_a = q_bias
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_a = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
_a = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
_a = gamma_a
_a = gamma_a
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = dct.pop(_lowerCAmelCase )
_a = val
def A_ ( ):
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_a = False if '''rvlcdip''' in checkpoint_url else True
_a = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase, use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_a = 10_24
_a = 40_96
_a = 24
_a = 16
# labels
if "rvlcdip" in checkpoint_url:
_a = 16
_a = '''huggingface/label-files'''
_a = '''rvlcdip-id2label.json'''
_a = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type='''dataset''' ), '''r''' ) )
_a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_a = torch.hub.load_state_dict_from_url(_lowerCAmelCase, map_location='''cpu''' )['''model''']
_a = create_rename_keys(_lowerCAmelCase, has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase, _lowerCAmelCase, has_lm_head=_lowerCAmelCase )
# load HuggingFace model
_a = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
_a = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=_lowerCAmelCase )
_a = prepare_img()
_a = image_processor(images=_lowerCAmelCase, return_tensors='''pt''' )
_a = encoding['''pixel_values''']
_a = model(_lowerCAmelCase )
_a = outputs.logits
# verify logits
_a = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
_a = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
_a = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase, _lowerCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=_lowerCAmelCase, )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase, _lowerCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=_lowerCAmelCase, )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 285 | 0 |
"""simple docstring"""
import string
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> None:
for key in range(len(string.ascii_uppercase ) ):
lowercase__: Tuple = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowercase__: List[str] = string.ascii_uppercase.find(__UpperCAmelCase )
lowercase__: Dict = num - key
if num < 0:
lowercase__: Optional[int] = num + len(string.ascii_uppercase )
lowercase__: Tuple = translated + string.ascii_uppercase[num]
else:
lowercase__: Optional[int] = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: int = input('''Encrypted message: ''' )
lowercase__: Tuple = message.upper()
decrypt(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 586 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "timesformer"
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=8 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase="divided_space_time" , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Optional[int] = image_size
lowercase__: Optional[Any] = patch_size
lowercase__: Dict = num_channels
lowercase__: Tuple = num_frames
lowercase__: Any = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: Optional[int] = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: str = qkv_bias
lowercase__: Tuple = attention_type
lowercase__: Tuple = drop_path_rate
| 586 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase_ : int = logging.get_logger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
def constraint_to_multiple_of(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0 , lowerCAmelCase_=None ):
_UpperCAmelCase : str = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCAmelCase : Union[str, Any] = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCAmelCase : Optional[int] = math.ceil(val / multiple ) * multiple
return x
_UpperCAmelCase : int = (output_size, output_size) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else output_size
_UpperCAmelCase , _UpperCAmelCase : Tuple = get_image_size(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase : Dict = output_size
# determine new height and width
_UpperCAmelCase : List[str] = output_height / input_height
_UpperCAmelCase : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCAmelCase : str = scale_width
else:
# fit height
_UpperCAmelCase : Any = scale_height
_UpperCAmelCase : Optional[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase_ )
_UpperCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase_ )
return (new_height, new_width)
class __lowerCAmelCase ( __a ):
snake_case : Union[str, Any] = ["""pixel_values"""]
def __init__(self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = False , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Tuple = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
_UpperCAmelCase : int = get_size_dict(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Union[str, Any] = keep_aspect_ratio
_UpperCAmelCase : int = ensure_multiple_of
_UpperCAmelCase : Tuple = resample
_UpperCAmelCase : List[Any] = do_rescale
_UpperCAmelCase : int = rescale_factor
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = 1 , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
_UpperCAmelCase : int = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_UpperCAmelCase : Dict = get_resize_output_image_size(
lowerCAmelCase__ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=lowerCAmelCase__ , multiple=lowerCAmelCase__ , )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ):
_UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Optional[Any] = size if size is not None else self.size
_UpperCAmelCase : str = get_size_dict(lowerCAmelCase__ )
_UpperCAmelCase : str = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCAmelCase : int = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCAmelCase : str = resample if resample is not None else self.resample
_UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Tuple = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase : int = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCAmelCase : Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCAmelCase : Tuple = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCAmelCase : str = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCAmelCase : str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCAmelCase : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = target_sizes.numpy()
_UpperCAmelCase : Any = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCAmelCase : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCAmelCase : Union[str, Any] = logits.argmax(dim=1 )
_UpperCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase_ : Tuple = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def __A ( lowerCAmelCase_ ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase_ : str = parser.parse_args()
if args.check_lib:
lowerCAmelCase_ : Any = importlib.import_module('''transformers''')
lowerCAmelCase_ : Dict = Path(transformers_module.__file__).parent
else:
lowerCAmelCase_ : str = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 156 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = checkpoint
__lowerCAmelCase = {}
__lowerCAmelCase = vae_state_dict['encoder.conv_in.weight']
__lowerCAmelCase = vae_state_dict['encoder.conv_in.bias']
__lowerCAmelCase = vae_state_dict['encoder.conv_out.weight']
__lowerCAmelCase = vae_state_dict['encoder.conv_out.bias']
__lowerCAmelCase = vae_state_dict['encoder.norm_out.weight']
__lowerCAmelCase = vae_state_dict['encoder.norm_out.bias']
__lowerCAmelCase = vae_state_dict['decoder.conv_in.weight']
__lowerCAmelCase = vae_state_dict['decoder.conv_in.bias']
__lowerCAmelCase = vae_state_dict['decoder.conv_out.weight']
__lowerCAmelCase = vae_state_dict['decoder.conv_out.bias']
__lowerCAmelCase = vae_state_dict['decoder.norm_out.weight']
__lowerCAmelCase = vae_state_dict['decoder.norm_out.bias']
__lowerCAmelCase = vae_state_dict['quant_conv.weight']
__lowerCAmelCase = vae_state_dict['quant_conv.bias']
__lowerCAmelCase = vae_state_dict['post_quant_conv.weight']
__lowerCAmelCase = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__lowerCAmelCase = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the decoder up blocks only
__lowerCAmelCase = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ )
}
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
__lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""down.{i}.block""", 'new': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__lowerCAmelCase = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__lowerCAmelCase = renew_vae_attention_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
conv_attn_to_linear(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = num_up_blocks - 1 - i
__lowerCAmelCase = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
__lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""up.{block_id}.block""", 'new': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__lowerCAmelCase = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__lowerCAmelCase = renew_vae_attention_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
conv_attn_to_linear(lowerCAmelCase_ )
return new_checkpoint
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, ):
# Only support V1
__lowerCAmelCase = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__lowerCAmelCase = io.BytesIO(r.content )
__lowerCAmelCase = OmegaConf.load(lowerCAmelCase_ )
__lowerCAmelCase = 512
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__lowerCAmelCase = {}
with safe_open(lowerCAmelCase_, framework='pt', device='cpu' ) as f:
for key in f.keys():
__lowerCAmelCase = f.get_tensor(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location=lowerCAmelCase_ )['state_dict']
# Convert the VAE model.
__lowerCAmelCase = create_vae_diffusers_config(lowerCAmelCase_, image_size=lowerCAmelCase_ )
__lowerCAmelCase = custom_convert_ldm_vae_checkpoint(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = AutoencoderKL(**lowerCAmelCase_ )
vae.load_state_dict(lowerCAmelCase_ )
vae.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_snake_case : Union[str, Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 53 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[str] = GPTaTokenizer
lowerCamelCase : Optional[int] = GPTaTokenizerFast
lowerCamelCase : List[Any] = True
lowerCamelCase : List[str] = {"add_prefix_space": True}
lowerCamelCase : Optional[int] = False
def __lowercase ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__snake_case :Dict = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case :List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case :List[str] = {"""unk_token""": """<unk>"""}
__snake_case :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __lowercase ( self , **a__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , **a__ ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[Any] = """lower newer"""
__snake_case :Any = """lower newer"""
return input_text, output_text
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case :List[Any] = """lower newer"""
__snake_case :int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__snake_case :Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
__snake_case :List[Any] = tokens + [tokenizer.unk_token]
__snake_case :List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case :Union[str, Any] = self.get_tokenizer()
__snake_case :Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case :Optional[int] = """lower newer"""
# Testing tokenization
__snake_case :List[Any] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
__snake_case :int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case :List[str] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case :int = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case :Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case :Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
__snake_case :Optional[int] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
__snake_case :Dict = tokens + [rust_tokenizer.unk_token]
__snake_case :Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __lowercase ( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
pass
def __lowercase ( self , a__=15 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case :Optional[int] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
__snake_case :List[str] = """This is a simple input"""
__snake_case :List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case :Tuple = ("""This is a simple input""", """This is a pair""")
__snake_case :Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__snake_case :Tuple = """This is a simple input"""
__snake_case :Optional[int] = ["""This is a simple input looooooooong""", """This is a simple input"""]
__snake_case :List[str] = ("""This is a simple input""", """This is a pair""")
__snake_case :List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__snake_case :Tuple = tokenizer.pad_token_id
__snake_case :int = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__snake_case :Optional[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
__snake_case :List[Any] = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__snake_case :Optional[int] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[int] = """$$$"""
__snake_case :Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
__snake_case :List[Any] = """This is a simple input"""
__snake_case :Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case :Tuple = tokenizer.bos_token_id
__snake_case :List[str] = tokenizer(a__ )
__snake_case :Any = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case :int = tokenizer.decode(out_s.input_ids )
__snake_case :Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[str] = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__snake_case :Tuple = """Encode this."""
__snake_case :Tuple = """This one too please."""
__snake_case :Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :List[str] = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
__snake_case :Union[str, Any] = encoded_sequence_dict["""input_ids"""]
__snake_case :Optional[Any] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(a__ ) , len(a__ ) )
__snake_case :Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
__snake_case :str = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Union[str, Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
__snake_case :Union[str, Any] = """A photo of a cat"""
__snake_case :int = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""test_opt""" )
__snake_case :int = AutoTokenizer.from_pretrained("""./test_opt""" )
__snake_case :Tuple = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Tuple = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=a__ )
__snake_case :str = """A photo of a cat"""
__snake_case :List[str] = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
__snake_case :Optional[Any] = """bos"""
__snake_case :int = tokenizer.get_vocab()["""bos"""]
__snake_case :str = """A photo of a cat"""
__snake_case :int = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""./tok""" )
__snake_case :Dict = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
__snake_case :List[Any] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 455 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Tuple = logging.get_logger(__name__)
def snake_case ( snake_case : Dict , snake_case : List[str]=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if key.startswith('head' ):
lowerCAmelCase = key.replace('head' , 'classifier' )
lowerCAmelCase = value
return new_state_dict
def snake_case ( snake_case : str , snake_case : int ) -> Tuple:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def snake_case ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def snake_case ( snake_case : int , snake_case : Tuple , snake_case : List[str] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = SegformerConfig()
lowerCAmelCase = False
# set attributes based on model_name
lowerCAmelCase = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase = 150
lowerCAmelCase = 'ade20k-id2label.json'
lowerCAmelCase = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase = 19
lowerCAmelCase = 'cityscapes-id2label.json'
lowerCAmelCase = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
lowerCAmelCase = True
lowerCAmelCase = model_name[4:6]
lowerCAmelCase = 1000
lowerCAmelCase = 'imagenet-1k-id2label.json'
lowerCAmelCase = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase = [64, 128, 320, 512]
lowerCAmelCase = 256
elif size == "b2":
lowerCAmelCase = [64, 128, 320, 512]
lowerCAmelCase = 768
lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase = [64, 128, 320, 512]
lowerCAmelCase = 768
lowerCAmelCase = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase = [64, 128, 320, 512]
lowerCAmelCase = 768
lowerCAmelCase = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase = [64, 128, 320, 512]
lowerCAmelCase = 768
lowerCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
lowerCAmelCase = torch.load(snake_case , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase = torch.load(snake_case , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase = rename_keys(snake_case , encoder_only=snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase = False
lowerCAmelCase = SegformerForImageClassification(snake_case )
else:
lowerCAmelCase = SegformerForSemanticSegmentation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
lowerCAmelCase = model(snake_case )
lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase = torch.tensor(
[
[
[-1.1_372e01, -1.2_787e01, -1.3_477e01],
[-1.2_536e01, -1.4_194e01, -1.4_409e01],
[-1.3_217e01, -1.4_888e01, -1.5_327e01],
],
[
[-1.4_791e01, -1.7_122e01, -1.8_277e01],
[-1.7_163e01, -1.9_192e01, -1.9_533e01],
[-1.7_897e01, -1.9_991e01, -2.0_315e01],
],
[
[7.6_723e-01, 4.1_921e-01, -7.7_878e-02],
[4.7_772e-01, 9.5_557e-03, -2.8_082e-01],
[3.6_032e-01, -2.4_826e-01, -5.1_168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 702 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {"vocab_file": "spiece.model"}
_UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_UpperCamelCase : List[Any] = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowerCAmelCase = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = '<|endoftext|>' if eos_token is None else eos_token
lowerCAmelCase = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = '<pad>' if pad_token is None else pad_token
lowerCAmelCase = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
F'[{"".join(map(_SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.non_printing_characters_re.sub('' , _SCREAMING_SNAKE_CASE )
# Normalize whitespaces
lowerCAmelCase = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize('NFC' , _SCREAMING_SNAKE_CASE )
return text
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return out_string
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [self.preprocess_text(_SCREAMING_SNAKE_CASE ) for t in text]
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
return token_ids
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.decode(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(_SCREAMING_SNAKE_CASE ) + F'{self.bos_token}Bot:'
)
return self.encode(text=_SCREAMING_SNAKE_CASE )
| 514 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> str:
'''simple docstring'''
UpperCamelCase = OpenLlamaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Tuple:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = OpenLlamaModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> str:
'''simple docstring'''
UpperCamelCase = OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Any:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = OpenLlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = OpenLlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase = original_model(A_ ).last_hidden_state
UpperCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase = OpenLlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase = scaled_model(A_ ).last_hidden_state
UpperCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
| 3 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_=None ,a_=None ,a_=None ,**a_ ):
"""simple docstring"""
if tokenize_kwargs is None:
lowerCAmelCase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ = truncation
lowerCAmelCase__ = tokenize_kwargs
lowerCAmelCase__ = {}
if return_tensors is not None:
lowerCAmelCase__ = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.framework
lowerCAmelCase__ = self.tokenizer(a_ ,return_tensors=a_ ,**a_ )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=False ):
"""simple docstring"""
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*a_ ,**a_ ):
"""simple docstring"""
return super().__call__(*a_ ,**a_ )
| 193 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """encoder-decoder"""
a_ = True
def __init__( self : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> str:
super().__init__(**lowerCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__lowerCAmelCase = kwargs.pop('encoder' )
__lowerCAmelCase = encoder_config.pop('model_type' )
__lowerCAmelCase = kwargs.pop('decoder' )
__lowerCAmelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__lowerCAmelCase = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = True
@classmethod
def lowercase ( cls : Optional[int] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : List[Any] ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__lowerCAmelCase = True
__lowerCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.encoder.to_dict()
__lowerCAmelCase = self.decoder.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 421 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase__ : str = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase__ : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase__ : Dict = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
_UpperCamelCase = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 98 |
'''simple docstring'''
def a__ ( lowercase : Dict, lowercase : Optional[Any] ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(lowercase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_UpperCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase ):
return None
_UpperCamelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_UpperCamelCase = left
_UpperCamelCase = point
elif point > right:
_UpperCamelCase = right
_UpperCamelCase = point
else:
if item < current_item:
_UpperCamelCase = point - 1
else:
_UpperCamelCase = point + 1
return None
def a__ ( lowercase : int, lowercase : Optional[int], lowercase : List[Any], lowercase : List[Any] ) -> Dict:
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_UpperCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase, lowercase, lowercase, lowercase )
elif point > right:
return interpolation_search_by_recursion(lowercase, lowercase, lowercase, lowercase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase, lowercase, lowercase, point - 1 )
else:
return interpolation_search_by_recursion(
lowercase, lowercase, point + 1, lowercase )
def a__ ( lowercase : Any ) -> List[Any]:
"""simple docstring"""
if collection != sorted(lowercase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
lowercase__ : int = 0
if debug == 1:
lowercase__ : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
lowercase__ : List[str] = 67
lowercase__ : Optional[int] = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('Not found')
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_snake_case = 10
def snake_case ( _a: int , _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
for i in range(_a , _a ):
if array[i] == target:
return i
return -1
def snake_case ( _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(_a )
while left <= right:
if right - left < precision:
return lin_search(_a , _a , _a , _a )
lowerCamelCase__ = (left + right) // 3 + 1
lowerCamelCase__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase__ = one_third - 1
elif array[two_third] < target:
lowerCamelCase__ = two_third + 1
else:
lowerCamelCase__ = one_third + 1
lowerCamelCase__ = two_third - 1
else:
return -1
def snake_case ( _a: int , _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_a , _a , _a , _a )
lowerCamelCase__ = (left + right) // 3 + 1
lowerCamelCase__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_a , one_third - 1 , _a , _a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _a , _a , _a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _a , _a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = ite_ternary_search(collection, target)
_snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found")
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self: Dict , a: Union[str, Any] , a: int=13 , a: Any=7 , a: int=True , a: List[Any]=True , a: int=True , a: Optional[int]=True , a: List[Any]=99 , a: Tuple=32 , a: List[str]=5 , a: Union[str, Any]=4 , a: Tuple=37 , a: Optional[Any]="gelu" , a: Any=0.1 , a: List[Any]=0.1 , a: List[Any]=5_12 , a: List[str]=16 , a: Optional[Any]=2 , a: str=0.02 , a: Optional[int]=4 , ) ->Union[str, Any]:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_attention_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_choices
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_attention_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a , )
return config, input_ids, attention_mask
def _lowerCAmelCase ( self: Union[str, Any]) ->List[str]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = FlaxDistilBertModelTester(self)
@slow
def _lowerCAmelCase ( self: Union[str, Any]) ->Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained("distilbert-base-uncased")
a_ = model(np.ones((1, 1)))
self.assertIsNotNone(a)
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: Any) ->str:
'''simple docstring'''
a_ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased")
a_ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
a_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
a_ = model(a , attention_mask=a)[0]
a_ = (1, 11, 7_68)
self.assertEqual(output.shape , a)
a_ = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4))
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase : int | str ):
'''simple docstring'''
__lowercase = str(__UpperCamelCase )
return n == n[::-1]
def lowercase__ ( __UpperCamelCase : int = 1000000 ):
'''simple docstring'''
__lowercase = 0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 339 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCamelCase : float
UpperCamelCase : TreeNode | None = None
UpperCamelCase : TreeNode | None = None
def lowercase__ ( __UpperCamelCase : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(__UpperCamelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__UpperCamelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__UpperCamelCase : TreeNode | None , __UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __UpperCamelCase )
)
return is_binary_search_tree_recursive_check(__UpperCamelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.txt'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/esm2_t6_8M_UR50D''': 1_0_2_4,
'''facebook/esm2_t12_35M_UR50D''': 1_0_2_4,
}
def A ( __UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase , 'r' ) as f:
A__ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple="<unk>" , _snake_case : Union[str, Any]="<cls>" , _snake_case : List[str]="<pad>" , _snake_case : Optional[Any]="<mask>" , _snake_case : Optional[Any]="<eos>" , **_snake_case : List[Any] , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = load_vocab_file(_snake_case )
A__ = dict(enumerate(self.all_tokens ) )
A__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
A__ = unk_token
A__ = cls_token
A__ = pad_token
A__ = mask_token
A__ = eos_token
A__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _a ( self : Optional[Any] , _snake_case : int ):
"""simple docstring"""
return self._id_to_token.get(_snake_case , self.unk_token )
def _a ( self : List[Any] , _snake_case : str ):
"""simple docstring"""
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) )
def _a ( self : str , _snake_case : List[Any] , **_snake_case : Optional[Any] ):
"""simple docstring"""
return text.split()
def _a ( self : Any , _snake_case : str=False ):
"""simple docstring"""
return len(self._id_to_token )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def _a ( self : Optional[Any] , _snake_case : str ):
"""simple docstring"""
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) )
def _a ( self : Union[str, Any] , _snake_case : int ):
"""simple docstring"""
return self._id_to_token.get(_snake_case , self.unk_token )
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.cls_token_id]
A__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _a ( self : List[Any] , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A__ = [1] + ([0] * len(_snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(_snake_case ) + [1]
return mask
def _a ( self : Optional[int] , _snake_case : Union[str, Any] , _snake_case : int ):
"""simple docstring"""
A__ = os.path.join(_snake_case , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_snake_case , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_snake_case )
def _a ( self : Tuple , _snake_case : Union[List[str], List[AddedToken]] , _snake_case : bool = False ):
"""simple docstring"""
return super()._add_tokens(_snake_case , special_tokens=_snake_case )
| 9 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : Dict = "bridgetower_vision_model"
def __init__( self : Any , _lowerCamelCase : int=7_6_8 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=1_6 , _lowerCamelCase : int=2_8_8 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Dict=1E-05 , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=True , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : int = patch_size
__lowerCamelCase : Union[str, Any] = image_size
__lowerCamelCase : int = initializer_factor
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Optional[int] = stop_gradient
__lowerCamelCase : Dict = share_layernorm
__lowerCamelCase : List[str] = remove_last_layer
@classmethod
def _snake_case ( cls : Dict , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
__lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : Optional[Any] = "bridgetower_text_model"
def __init__( self : Optional[Any] , _lowerCamelCase : List[Any]=5_0_2_6_5 , _lowerCamelCase : Tuple=7_6_8 , _lowerCamelCase : Dict=1_2 , _lowerCamelCase : Union[str, Any]=1_2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=3_0_7_2 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Dict=5_1_4 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Tuple=1E-05 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : str=0 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Any="absolute" , _lowerCamelCase : List[str]=True , **_lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : int = initializer_factor
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Optional[Any] = position_embedding_type
__lowerCamelCase : Any = use_cache
__lowerCamelCase : int = pad_token_id
__lowerCamelCase : Dict = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
@classmethod
def _snake_case ( cls : List[str] , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
__lowerCamelCase : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : List[Any] = "bridgetower"
def __init__( self : List[str] , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : str="gelu" , _lowerCamelCase : List[Any]=7_6_8 , _lowerCamelCase : Any=1 , _lowerCamelCase : Optional[int]=1E-05 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]="add" , _lowerCamelCase : Any=1_2 , _lowerCamelCase : Union[str, Any]=6 , _lowerCamelCase : int=False , _lowerCamelCase : List[Any]=False , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : List[str] , ):
'''simple docstring'''
__lowerCamelCase : Any = kwargs.pop("""text_config_dict""" , _lowerCamelCase )
__lowerCamelCase : int = kwargs.pop("""vision_config_dict""" , _lowerCamelCase )
super().__init__(**_lowerCamelCase )
__lowerCamelCase : Any = share_cross_modal_transformer_layers
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Dict = initializer_factor
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : List[Any] = share_link_tower_layers
__lowerCamelCase : Dict = link_tower_type
__lowerCamelCase : str = num_attention_heads
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[str] = tie_word_embeddings
__lowerCamelCase : Any = init_layernorm_from_vision_encoder
if text_config is None:
__lowerCamelCase : str = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
__lowerCamelCase : Any = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
__lowerCamelCase : Union[str, Any] = BridgeTowerTextConfig(**_lowerCamelCase )
__lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**_lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , _lowerCamelCase : BridgeTowerTextConfig , _lowerCamelCase : BridgeTowerVisionConfig , **_lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : str = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Union[str, Any] = self.text_config.to_dict()
__lowerCamelCase : List[Any] = self.vision_config.to_dict()
__lowerCamelCase : List[Any] = self.__class__.model_type
return output
| 458 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['LayoutLMv2FeatureExtractor']
__UpperCamelCase : Union[str, Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458 | 1 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _A ( lowercase__ , lowercase__=False ):
try:
lowercase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ = strtobool(lowercase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__A = parse_flag_from_env("RUN_SLOW", default=False)
__A = parse_flag_from_env("RUN_REMOTE", default=False)
__A = parse_flag_from_env("RUN_LOCAL", default=True)
__A = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
__A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
__A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
__A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
__A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
__A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
__A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
__A = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def _A ( lowercase__ ):
try:
import faiss # noqa
except ImportError:
lowercase__ = unittest.skip("""test requires faiss""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
try:
import regex # noqa
except ImportError:
lowercase__ = unittest.skip("""test requires regex""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ = unittest.skip("""test requires elasticsearch""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ = unittest.skip("""test requires sqlalchemy""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
if not config.TORCH_AVAILABLE:
lowercase__ = unittest.skip("""test requires PyTorch""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
if not config.TF_AVAILABLE:
lowercase__ = unittest.skip("""test requires TensorFlow""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
if not config.JAX_AVAILABLE:
lowercase__ = unittest.skip("""test requires JAX""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
if not config.PIL_AVAILABLE:
lowercase__ = unittest.skip("""test requires Pillow""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase__ )
else:
return test_case
def _A ( lowercase__ ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase__ )
else:
return test_case
def _A ( lowercase__ ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
else:
return test_case
def _A ( lowercase__ ):
def _require_spacy_model(lowercase__ ):
try:
import spacy # noqa F401
spacy.load(lowercase__ )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase__ ) )(lowercase__ )
else:
return test_case
return _require_spacy_model
def _A ( lowercase__ ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase__ )
else:
return test_case
def _A ( lowercase__ ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase__ )
else:
return test_case
def _A ( lowercase__ ):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ = unittest.skip("""test is slow""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ = unittest.skip("""test is local""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ = unittest.skip("""test is packaged""" )(lowercase__ )
return test_case
def _A ( lowercase__ ):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ = unittest.skip("""test requires remote""" )(lowercase__ )
return test_case
def _A ( *lowercase__ ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowercase__ ) and name.startswith("""test""" ):
for decorator in decorators:
lowercase__ = decorator(lowercase__ )
setattr(cls , lowercase__ , lowercase__ )
return cls
return decorate
class A ( __UpperCAmelCase ):
pass
class A ( __UpperCAmelCase ):
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Tuple = 2
@contextmanager
def _A ( lowercase__=OfflineSimulationMode.CONNECTION_FAILS , lowercase__=1e-16 ):
lowercase__ = requests.Session().request
def timeout_request(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
# Change the url to an invalid url so that the connection hangs
lowercase__ = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
lowercase__ = timeout
try:
return online_request(lowercase__ , lowercase__ , **lowercase__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ = url
lowercase__ = e.args[0]
lowercase__ = (max_retry_error.args[0].replace("""10.255.255.1""" , f'''OfflineMock[{url}]''' ),)
lowercase__ = (max_retry_error,)
raise
def raise_connection_error(lowercase__ , lowercase__ , **lowercase__ ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=lowercase__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _A ( *lowercase__ , **lowercase__ ):
lowercase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase__ , **lowercase__ ) as tmp_dir:
try:
os.chdir(lowercase__ )
yield
finally:
os.chdir(lowercase__ )
@contextmanager
def _A ( ):
import gc
gc.collect()
lowercase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _A ( ):
import gc
gc.collect()
lowercase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _A ( lowercase__ , lowercase__ ):
return deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist()
def _A ( lowercase__ ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase__ , *lowercase__ , **lowercase__ ):
try:
return func(*lowercase__ , **lowercase__ )
except HTTPError as err:
if str(lowercase__ ).startswith("""500""" ) or str(lowercase__ ).startswith("""502""" ):
pytest.xfail(str(lowercase__ ) )
raise err
return decorator.decorator(_wrapper , lowercase__ )
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = returncode
lowercase__ = stdout
lowercase__ = stderr
async def _A ( lowercase__ , lowercase__ ):
while True:
lowercase__ = await stream.readline()
if line:
callback(lowercase__ )
else:
break
async def _A ( lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False ):
if echo:
print("""\nRunning: """ , """ """.join(lowercase__ ) )
lowercase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ = []
lowercase__ = []
def tee(lowercase__ , lowercase__ , lowercase__ , lowercase__="" ):
lowercase__ = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase__ )
if not quiet:
print(lowercase__ , lowercase__ , file=lowercase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stderr , label="""stderr:""" ) ),
] , timeout=lowercase__ , )
return _RunOutput(await p.wait() , lowercase__ , lowercase__ )
def _A ( lowercase__ , lowercase__=None , lowercase__=None , lowercase__=180 , lowercase__=False , lowercase__=True ):
lowercase__ = asyncio.get_event_loop()
lowercase__ = loop.run_until_complete(
_stream_subprocess(lowercase__ , env=lowercase__ , stdin=lowercase__ , timeout=lowercase__ , quiet=lowercase__ , echo=lowercase__ ) )
lowercase__ = """ """.join(lowercase__ )
if result.returncode > 0:
lowercase__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _A ( ):
lowercase__ = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
lowercase__ = re.sub(r"""^gw""" , """""" , lowercase__ , 0 , re.M )
return int(lowercase__ )
def _A ( ):
lowercase__ = 29500
lowercase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 325 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = hidden_states.shape
lowercase__ = jax.image.resize(
lowerCamelCase__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
lowercase__ = self.conv(lowerCamelCase__ )
return hidden_states
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.conv(lowerCamelCase__ )
return hidden_states
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.in_channels if self.out_channels is None else self.out_channels
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = nn.Dense(lowerCamelCase__ , dtype=self.dtype )
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowercase__ = nn.Dropout(self.dropout_prob )
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase__ = None
if use_nin_shortcut:
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ) -> List[str]:
'''simple docstring'''
lowercase__ = hidden_states
lowercase__ = self.norma(lowerCamelCase__ )
lowercase__ = nn.swish(lowerCamelCase__ )
lowercase__ = self.conva(lowerCamelCase__ )
lowercase__ = self.time_emb_proj(nn.swish(lowerCamelCase__ ) )
lowercase__ = jnp.expand_dims(jnp.expand_dims(lowerCamelCase__ , 1 ) , 1 )
lowercase__ = hidden_states + temb
lowercase__ = self.norma(lowerCamelCase__ )
lowercase__ = nn.swish(lowerCamelCase__ )
lowercase__ = self.dropout(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self.conva(lowerCamelCase__ )
if self.conv_shortcut is not None:
lowercase__ = self.conv_shortcut(lowerCamelCase__ )
return hidden_states + residual
| 325 | 1 |
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : list[int | str] ):
create_state_space_tree(lowerCAmelCase__ , [] , 0 , [0 for i in range(len(lowerCAmelCase__ ) )] )
def __a ( lowerCAmelCase__ : list[int | str] , lowerCAmelCase__ : list[int | str] , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , ):
if index == len(lowerCAmelCase__ ):
print(lowerCAmelCase__ )
return
for i in range(len(lowerCAmelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a__ : Union[str, Any] = True
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , lowerCAmelCase__ )
current_sequence.pop()
a__ : Tuple = False
__SCREAMING_SNAKE_CASE = [3, 1, 2, 4]
generate_all_permutations(sequence)
__SCREAMING_SNAKE_CASE = ['A', 'B', 'C']
generate_all_permutations(sequence_a)
| 709 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : str = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
a__ : Tuple = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
a__ : str = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
a__ : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a__ : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
a__ : Union[str, Any] = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
a__ : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : List[str] = '''MLukeTokenizer'''
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
a__ : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
a__ : str = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
a__ : Any = state_dict['''embeddings.word_embeddings.weight''']
a__ : str = word_emb[ent_init_index].unsqueeze(0 )
a__ : Optional[int] = word_emb[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a__ : Union[str, Any] = state_dict[bias_name]
a__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
a__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a__ : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
a__ : str = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a__ : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
a__ : Union[str, Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a__ : List[Any] = state_dict['''entity_predictions.bias''']
a__ : str = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
a__ : Optional[Any] = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
a__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
a__ : Dict = state_dict[key]
else:
a__ : List[Any] = state_dict[key]
a__ , a__ : List[str] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a__ : Any = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' )
a__ : Optional[int] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
a__ : List[str] = (0, 9)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Tuple = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Union[str, Any] = torch.Size((1, 33, 768) )
a__ : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Tuple = torch.Size((1, 1, 768) )
a__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
a__ : List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Tuple = '''Tokyo is the capital of <mask>.'''
a__ : str = (24, 30)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Any = model(**lowerCAmelCase__ )
a__ : Optional[int] = encoding['''input_ids'''][0].tolist()
a__ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
a__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
a__ : Optional[int] = outputs.entity_logits[0][0].argmax().item()
a__ : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : str ):
a__ : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
a__ : Optional[Any] = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
a__ : Any = {}
for entry in data:
a__ : Tuple = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a__ : Union[str, Any] = entity_id
break
a__ : Dict = F'{language}:{entity_name}'
a__ : Dict = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 340 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,) -> str:
if attention_mask is None:
__lowerCamelCase : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase : Dict = torch.ones(config.encoder_layers ,config.encoder_attention_heads ,device=UpperCamelCase__ )
if decoder_head_mask is None:
__lowerCamelCase : Tuple = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=UpperCamelCase__ )
if cross_attn_head_mask is None:
__lowerCamelCase : int = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=UpperCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Dict , _a : Tuple , _a : Any=13 , _a : List[str]=7 , _a : Dict=True , _a : List[Any]=False , _a : List[Any]=99 , _a : List[str]=16 , _a : Optional[int]=2 , _a : Union[str, Any]=4 , _a : Any=4 , _a : List[Any]="relu" , _a : List[str]=0.1 , _a : Optional[int]=0.1 , _a : Optional[int]=0.0 , _a : int=0.0 , _a : Any=20 , _a : str=2 , _a : Optional[Any]=1 , _a : Dict=0 , ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Tuple = batch_size
__lowerCamelCase : Any = seq_length
__lowerCamelCase : Union[str, Any] = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = encoder_layerdrop
__lowerCamelCase : List[str] = decoder_layerdrop
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Dict = eos_token_id
__lowerCamelCase : Optional[Any] = pad_token_id
__lowerCamelCase : int = bos_token_id
def _lowercase ( self : Optional[int] ) -> Any:
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Dict = self.eos_token_id # Eos Token
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase : List[str] = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase : Tuple = self.get_config()
__lowerCamelCase : Tuple = prepare_mam_aaa_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def _lowercase ( self : int ) -> List[str]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : Dict ) -> List[str]:
__lowerCamelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Union[str, Any] , _a : List[Any] , _a : int ) -> Dict:
__lowerCamelCase : Tuple = MaMaaaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
__lowerCamelCase : List[str] = inputs_dict["""input_ids"""]
__lowerCamelCase : List[Any] = inputs_dict["""attention_mask"""]
__lowerCamelCase : Tuple = inputs_dict["""head_mask"""]
# first forward pass
__lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
__lowerCamelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowerCamelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["""last_hidden_state"""]
__lowerCamelCase : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[
"""last_hidden_state"""
]
# select random slice
__lowerCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 ) )
def _lowercase ( self : Union[str, Any] , _a : List[str] , _a : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaMaaaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval()
__lowerCamelCase : str = model(**UpperCAmelCase__ )
__lowerCamelCase : List[str] = outputs.encoder_last_hidden_state
__lowerCamelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[str] = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase__ )
__lowerCamelCase : List[str] = MaMaaaEncoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
__lowerCamelCase : Tuple = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Union[str, Any] = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase__ )
__lowerCamelCase : List[Any] = MaMaaaDecoder.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
__lowerCamelCase : Tuple = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
a_ =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ =(
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ =True
a_ =True
a_ =False
a_ =False
def _lowercase ( self : str , _a : List[Any] , _a : List[str] , _a : List[Any] , _a : int , _a : Union[str, Any] ) -> List[str]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : int ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = MaMaaaModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ) -> int:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
__lowerCamelCase : Any = model_class.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertEqual(info['missing_keys'] , [] )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__lowerCamelCase : Optional[int] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCamelCase : Tuple = copy.deepcopy(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
if not self.is_encoder_decoder:
__lowerCamelCase : int = inputs["""input_ids"""]
del inputs["input_ids"]
else:
__lowerCamelCase : int = inputs["""input_ids"""]
__lowerCamelCase : Optional[int] = inputs.get('decoder_input_ids' , UpperCAmelCase__ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , UpperCAmelCase__ )
__lowerCamelCase : Union[str, Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__lowerCamelCase : Tuple = wte(UpperCAmelCase__ )
else:
__lowerCamelCase : List[Any] = wte(UpperCAmelCase__ )
__lowerCamelCase : List[Any] = wte(UpperCAmelCase__ )
with torch.no_grad():
model(**UpperCAmelCase__ )[0]
def _lowercase ( self : Dict ) -> Dict:
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : List[str] = input_dict["""input_ids"""]
__lowerCamelCase : Optional[int] = input_ids.ne(1 ).to(UpperCAmelCase__ )
__lowerCamelCase : Optional[int] = MaMaaaForConditionalGeneration(UpperCAmelCase__ ).eval().to(UpperCAmelCase__ )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
model.generate(num_beams=4 , do_sample=UpperCAmelCase__ , early_stopping=UpperCAmelCase__ , num_return_sequences=3 )
def a_ ( _lowerCAmelCase ) -> Optional[Any]:
return torch.tensor(UpperCamelCase__ ,dtype=torch.long ,device=UpperCamelCase__ )
_UpperCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Tuple ) -> str:
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def _lowercase ( self : int ) -> str:
__lowerCamelCase : List[Any] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase__ )
__lowerCamelCase : int = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
__lowerCamelCase : int = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
__lowerCamelCase : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**UpperCAmelCase__ )[0]
__lowerCamelCase : Optional[int] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def _lowercase ( self : Union[str, Any] ) -> Any:
__lowerCamelCase : List[Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase__ )
# change to intended input
__lowerCamelCase : List[Any] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
__lowerCamelCase : List[Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
__lowerCamelCase : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(**UpperCAmelCase__ )[0]
__lowerCamelCase : Tuple = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
__lowerCamelCase : Dict = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase__ )
__lowerCamelCase : str = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
__lowerCamelCase : List[str] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__lowerCamelCase : Optional[int] = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='pt' )
__lowerCamelCase : Dict = model.generate(
input_ids=dct['input_ids'].to(UpperCAmelCase__ ) , attention_mask=dct['attention_mask'].to(UpperCAmelCase__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
__lowerCamelCase : Optional[int] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
__lowerCamelCase : str = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
assert generated == expected_en
| 459 |
"""simple docstring"""
from PIL import Image
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
def brightness(UpperCamelCase__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_snake_case = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 389 | 0 |
from PIL import Image
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A ) -> int:
return int(128 + factor * (c - 128) )
return img.point(A )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
_a: Dict = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""") | 268 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_a: Optional[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( A ):
if isinstance(A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['pixel_values']
def __init__( self : int , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = offset
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = image.astype(np.floataa )
if offset:
UpperCAmelCase_ = image - (scale / 2)
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ):
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Union[str, Any] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase , offset=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def __A ( self : Optional[int] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = offset if offset is not None else self.offset
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , offset=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) | 268 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 1 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
snake_case = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class lowerCAmelCase :
A_ : List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
A_ : List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
A_ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
A_ : bool = field(
default=a__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
A_ : bool = field(
default=a__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
A_ : bool = field(
default=a__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
A_ : bool = field(default=a__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
A_ : bool = field(default=a__ , metadata={"""help""": """Benchmark training of model"""} )
A_ : bool = field(default=a__ , metadata={"""help""": """Verbose memory tracing"""} )
A_ : bool = field(
default=a__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
A_ : bool = field(
default=a__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
A_ : bool = field(default=a__ , metadata={"""help""": """Trace memory line by line"""} )
A_ : bool = field(default=a__ , metadata={"""help""": """Save result to a CSV file"""} )
A_ : bool = field(default=a__ , metadata={"""help""": """Save all print statements in a log file"""} )
A_ : bool = field(default=a__ , metadata={"""help""": """Whether to print environment information"""} )
A_ : bool = field(
default=a__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
A_ : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
A_ : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
A_ : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
A_ : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
A_ : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
A_ : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
A_ : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
A_ : bool = field(
default=a__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def _A ( self : Dict ):
'''simple docstring'''
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , lowerCAmelCase__ , )
def _A ( self : int ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True | 709 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""google/rembert""": 2_56,
}
snake_case = """▁"""
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = RemBertTokenizer
def __init__( self : Dict , a__ : int=None , a__ : List[Any]=None , a__ : List[Any]=True , a__ : Dict=True , a__ : int=False , a__ : Tuple="[CLS]" , a__ : Optional[int]="[SEP]" , a__ : Optional[Any]="<unk>" , a__ : List[str]="[SEP]" , a__ : Any="<pad>" , a__ : List[str]="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : List[str] = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def _A ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : List[str] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error("Vocabulary path ({}) should be a directory".format(a__ ) )
return
lowerCAmelCase__ : List[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 568 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
__magic_name__ : int = 1
__magic_name__ : str = 3
__magic_name__ : Union[str, Any] = (32, 32)
__magic_name__ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__magic_name__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Dict:
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
__magic_name__ : List[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase_ )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
def extract(*__UpperCamelCase: Dict , **__UpperCamelCase: Tuple ):
class _snake_case :
'''simple docstring'''
def __init__( self: Union[str, Any] ) -> List[Any]:
__magic_name__ : Optional[int] = torch.ones([0] )
def lowerCAmelCase__ ( self: List[str] , __UpperCamelCase: Dict ) -> List[str]:
self.pixel_values.to(UpperCAmelCase_ )
return self
return Out()
return extract
def lowerCAmelCase__ ( self: Any ) -> Dict:
__magic_name__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : str = self.dummy_cond_unet
__magic_name__ : Any = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
__magic_name__ : Optional[Any] = self.dummy_vae
__magic_name__ : Tuple = self.dummy_text_encoder
__magic_name__ : List[str] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__magic_name__ : int = 77
__magic_name__ : Tuple = self.dummy_image.to(UpperCAmelCase_ )
__magic_name__ : Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__magic_name__ : str = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
__magic_name__ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase_ )
__magic_name__ : Union[str, Any] = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__magic_name__ : Optional[int] = 'A painting of a squirrel eating a burger'
__magic_name__ : Optional[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__magic_name__ : int = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase_ , )
__magic_name__ : Union[str, Any] = output.images
__magic_name__ : Any = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
__magic_name__ : List[Any] = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
__magic_name__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : Optional[int] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
__magic_name__ : Dict = self.dummy_cond_unet
__magic_name__ : Tuple = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
__magic_name__ : Union[str, Any] = self.dummy_vae
__magic_name__ : Any = self.dummy_text_encoder
__magic_name__ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__magic_name__ : List[str] = 77
__magic_name__ : str = self.dummy_image.to(UpperCAmelCase_ )
# put models in fp16
__magic_name__ : List[str] = unet.half()
__magic_name__ : str = vae.half()
__magic_name__ : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
__magic_name__ : List[Any] = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
__magic_name__ : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase_ )
__magic_name__ : List[Any] = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__magic_name__ : Optional[Any] = 'A painting of a squirrel eating a burger'
__magic_name__ : Union[str, Any] = torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[int]:
__magic_name__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
__magic_name__ : Dict = init_image.resize((760, 504) )
__magic_name__ : Any = 'BAAI/AltDiffusion'
__magic_name__ : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
__magic_name__ : Optional[Any] = 'A fantasy landscape, trending on artstation'
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : int = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=UpperCAmelCase_ , output_type="np" , )
__magic_name__ : int = output.images[0]
__magic_name__ : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__magic_name__ : int = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
__magic_name__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__magic_name__ : Dict = init_image.resize((768, 512) )
__magic_name__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
__magic_name__ : str = 'BAAI/AltDiffusion'
__magic_name__ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
__magic_name__ : Union[str, Any] = 'A fantasy landscape, trending on artstation'
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Tuple = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=UpperCAmelCase_ , output_type="np" , )
__magic_name__ : List[str] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 436 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
lowerCAmelCase : int = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase ) if (len(_UpperCAmelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ), 'Stack'.center(_UpperCAmelCase ), 'Postfix'.center(_UpperCAmelCase ), sep=' | ', )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCAmelCase ) == 0:
stack.append(_UpperCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_UpperCAmelCase ) # push x to stack
print(
x.center(8 ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), sep=' | ', ) # Output in tabular format
while len(_UpperCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), (''.join(_UpperCAmelCase )).ljust(_UpperCAmelCase ), sep=' | ', ) # Output in tabular format
return "".join(_UpperCAmelCase ) # return Postfix as str
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_UpperCAmelCase ) ):
if infix[i] == "(":
lowerCAmelCase : int = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCAmelCase : List[Any] = '(' # change ")" to "("
return (infix_2_postfix(''.join(_UpperCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__A : Optional[Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__A : str = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 343 | 0 |
import math
import qiskit
def SCREAMING_SNAKE_CASE ( lowercase_ = 1 , lowercase_ = 1 , lowercase_ = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
A__ = qiskit.QuantumRegister(4 , '''qr''' )
A__ = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
A__ = [input_a, input_a, carry_in]
A__ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase_ ) # measure the last two qbits
A__ = qiskit.Aer.get_backend('''aer_simulator''' )
A__ = qiskit.execute(lowercase_ , lowercase_ , shots=1_000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 177 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase : Union[str, Any] = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["""DPTFeatureExtractor"""]
_lowerCamelCase : int = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 177 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowerCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def _a ( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
A_ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ : int = [tuple(_a ) if isinstance(_a ,_a ) else key for key in keys]
A_ : Tuple = Counter(_a )
A_ : List[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def _a ( self : Any ,_a : Any ,_a : Any=False ):
'''simple docstring'''
A_ : Dict = super().construct_mapping(_a ,deep=_a )
self._check_no_duplicates_on_constructed_node(_a )
return mapping
def lowerCamelCase ( lowerCamelCase : str):
A_ : str = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ : int = full_content[1:].index("""---""") + 1
A_ : str = """\n""".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(lowerCamelCase)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def _a ( cls : Optional[int] ,_a : Path ):
'''simple docstring'''
with open(_a ,encoding="""utf-8""" ) as readme_file:
A_ , A_ : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_a )
else:
return cls()
def _a ( self : str ,_a : Path ):
'''simple docstring'''
if path.exists():
with open(_a ,encoding="""utf-8""" ) as readme_file:
A_ : List[Any] = readme_file.read()
else:
A_ : List[Any] = None
A_ : Optional[Any] = self._to_readme(_a )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as readme_file:
readme_file.write(_a )
def _a ( self : Any ,_a : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
A_ , A_ : Optional[int] = _split_yaml_from_readme(_a )
A_ : List[str] = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
A_ : List[str] = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _a ( cls : int ,_a : str ):
'''simple docstring'''
A_ : List[Any] = yaml.load(_a ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ : Union[str, Any] = {
(key.replace("""-""" ,"""_""" ) if key.replace("""-""" ,"""_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_a )
def _a ( self : Tuple ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" ,"""-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=_a ,allow_unicode=_a ,encoding="""utf-8""" ,).decode("""utf-8""" )
__magic_name__ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__magic_name__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__magic_name__ = ap.parse_args()
__magic_name__ = Path(args.readme_filepath)
__magic_name__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Any ) -> List[List[ImageInput]]:
if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_A ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
lowerCAmelCase = ['pixel_values']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase)
__A : int = size if size is not None else {'shortest_edge': 224}
__A : Any = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
__A : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : int = get_size_dict(__UpperCamelCase , param_name='crop_size')
__A : Optional[Any] = do_resize
__A : List[Any] = size
__A : Dict = do_center_crop
__A : Optional[Any] = crop_size
__A : Any = resample
__A : int = do_rescale
__A : int = rescale_factor
__A : int = do_normalize
__A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
if "shortest_edge" in size:
__A : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size['shortest_edge'] , default_to_square=__UpperCamelCase)
elif "height" in size and "width" in size:
__A : List[Any] = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[Any] = get_size_dict(__UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__A : Optional[Any] = to_numpy_array(__UpperCamelCase)
if do_resize:
__A : Tuple = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase)
if do_center_crop:
__A : List[str] = self.center_crop(__UpperCamelCase , size=__UpperCamelCase)
if do_rescale:
__A : int = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase)
if do_normalize:
__A : int = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase)
__A : List[Any] = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase)
return image
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : str = do_resize if do_resize is not None else self.do_resize
__A : Optional[int] = resample if resample is not None else self.resample
__A : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : str = do_normalize if do_normalize is not None else self.do_normalize
__A : List[str] = image_mean if image_mean is not None else self.image_mean
__A : Optional[int] = image_std if image_std is not None else self.image_std
__A : Any = size if size is not None else self.size
__A : str = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
__A : int = crop_size if crop_size is not None else self.crop_size
__A : Optional[Any] = get_size_dict(__UpperCamelCase , param_name='crop_size')
if not valid_images(__UpperCamelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
__A : Optional[int] = make_batched(__UpperCamelCase)
__A : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
__A : Any = {'pixel_values': videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase) | 719 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _lowerCAmelCase ( __snake_case : Dataset , __snake_case : Dict[str, str] ) -> Any:
__A : List[str] = args.log_outputs
__A : Tuple = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__A : Tuple = load_metric('wer' )
__A : Union[str, Any] = load_metric('cer' )
# compute metrics
__A : List[str] = wer.compute(references=result['target'] , predictions=result['prediction'] )
__A : str = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__A : List[str] = f'WER: {wer_result}\nCER: {cer_result}'
print(__snake_case )
with open(f'{dataset_id}_eval_results.txt' , 'w' ) as f:
f.write(__snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__A : Optional[int] = f'log_{dataset_id}_predictions.txt'
__A : List[str] = f'log_{dataset_id}_targets.txt'
with open(__snake_case , 'w' ) as p, open(__snake_case , 'w' ) as t:
# mapping function to write output
def write_to_file(__snake_case : List[str] , __snake_case : List[Any] ):
p.write(f'{i}' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'{i}' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__snake_case , with_indices=__snake_case )
def _lowerCAmelCase ( __snake_case : str ) -> str:
__A : Any = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__A : List[Any] = re.sub(__snake_case , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__A : Optional[int] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__A : Optional[int] = ' '.join(text.split(__snake_case ) )
return text
def _lowerCAmelCase ( __snake_case : Any ) -> List[Any]:
# load dataset
__A : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__A : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
__A : Optional[int] = feature_extractor.sampling_rate
# resample audio
__A : List[Any] = dataset.cast_column('audio' , Audio(sampling_rate=__snake_case ) )
# load eval pipeline
if args.device is None:
__A : List[Any] = 0 if torch.cuda.is_available() else -1
__A : Union[str, Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__snake_case : Optional[Any] ):
__A : int = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__A : Optional[Any] = prediction['text']
__A : str = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__A : Dict = dataset.map(__snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__snake_case , __snake_case )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowercase__ : Optional[Any] = parser.parse_args()
main(args) | 338 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 585 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__lowerCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__lowerCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return float((preds == labels).mean() )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="binary" ):
_snake_case = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = {}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
_snake_case = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_snake_case = [(pred, label)]
_snake_case, _snake_case = [], []
for question, preds_labels in question_map.items():
_snake_case, _snake_case = zip(*_SCREAMING_SNAKE_CASE )
_snake_case = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average="""macro""" )
fas.append(_SCREAMING_SNAKE_CASE )
_snake_case = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
_snake_case = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
_snake_case = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
_snake_case = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> int:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowercase (self ) -> Dict:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase , UpperCAmelCase , fa_avg="""macro""" )
elif self.config_name == "record":
_snake_case = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
_snake_case = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(UpperCAmelCase , UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) | 585 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Any = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Dict = '''xlm'''
UpperCAmelCase__ : List[Any] = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Optional[Any] , _snake_case : str=30145 , _snake_case : Tuple=2048 , _snake_case : Union[str, Any]=12 , _snake_case : Any=16 , _snake_case : Tuple=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : int=True , _snake_case : str=False , _snake_case : Optional[int]=False , _snake_case : Dict=False , _snake_case : Optional[Any]=1 , _snake_case : List[Any]=True , _snake_case : Optional[Any]=512 , _snake_case : Union[str, Any]=2048**-0.5 , _snake_case : str=1e-12 , _snake_case : Any=0.0_2 , _snake_case : Dict=0 , _snake_case : Optional[int]=1 , _snake_case : Optional[int]=2 , _snake_case : List[Any]=3 , _snake_case : Dict=5 , _snake_case : str=True , _snake_case : List[str]="first" , _snake_case : List[str]=True , _snake_case : int=None , _snake_case : Union[str, Any]=True , _snake_case : str=0.1 , _snake_case : Optional[int]=5 , _snake_case : Dict=5 , _snake_case : int=0 , _snake_case : Optional[int]=0 , _snake_case : Optional[Any]=2 , _snake_case : str=0 , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = emb_dim
UpperCAmelCase_ = n_layers
UpperCAmelCase_ = n_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = gelu_activation
UpperCAmelCase_ = sinusoidal_embeddings
UpperCAmelCase_ = causal
UpperCAmelCase_ = asm
UpperCAmelCase_ = n_langs
UpperCAmelCase_ = use_lang_emb
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = bos_index
UpperCAmelCase_ = eos_index
UpperCAmelCase_ = pad_index
UpperCAmelCase_ = unk_index
UpperCAmelCase_ = mask_index
UpperCAmelCase_ = is_encoder
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = embed_init_std
UpperCAmelCase_ = init_std
UpperCAmelCase_ = summary_type
UpperCAmelCase_ = summary_use_proj
UpperCAmelCase_ = summary_activation
UpperCAmelCase_ = summary_proj_to_labels
UpperCAmelCase_ = summary_first_dropout
UpperCAmelCase_ = start_n_top
UpperCAmelCase_ = end_n_top
UpperCAmelCase_ = mask_token_id
UpperCAmelCase_ = lang_id
if "n_words" in kwargs:
UpperCAmelCase_ = kwargs['''n_words''']
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , **_snake_case)
class __snake_case ( a ):
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 169 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def A (__A : int , __A : int , __A : int , __A : int , __A : int , __A : int ) -> tuple[int, int]:
"""simple docstring"""
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(__A , __A )
top //= hcf
bottom //= hcf
return top, bottom
def A (__A : int = 35 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
for num, den in unique_s:
total += Fraction(__A , __A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 169 | 1 |
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
_UpperCAmelCase = SwinConfig()
_UpperCAmelCase = swin_name.split('_' )
_UpperCAmelCase = name_split[1]
_UpperCAmelCase = int(name_split[4] )
_UpperCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCAmelCase = 2_1841
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase ( A : str ):
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_UpperCAmelCase = 'encoder.' + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_UpperCAmelCase = 'layernorm.weight'
if name == "norm.bias":
_UpperCAmelCase = 'layernorm.bias'
if "head" in name:
_UpperCAmelCase = name.replace('head' , 'classifier' )
else:
_UpperCAmelCase = 'swin.' + name
return name
def UpperCAmelCase ( A : int , A : str ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(A )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase ( A : List[Any] , A : Any ):
'''simple docstring'''
_UpperCAmelCase = timm.create_model(A , pretrained=A )
timm_model.eval()
_UpperCAmelCase = get_swin_config(A )
_UpperCAmelCase = SwinForImageClassification(A )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , A )
model.load_state_dict(A )
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
_UpperCAmelCase = image_processor(images=A , return_tensors='pt' )
_UpperCAmelCase = timm_model(inputs['pixel_values'] )
_UpperCAmelCase = model(**A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 573 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = (EulerDiscreteScheduler,)
UpperCAmelCase__ : Dict = 1_0
def lowerCamelCase__( self :Tuple ,**__snake_case :Dict ) -> int:
a__ = {
'num_train_timesteps': 11_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__snake_case )
return config
def lowerCamelCase__( self :str ) -> int:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] ,[0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__snake_case ,beta_end=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def lowerCamelCase__( self :List[Any] ) -> Tuple:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(prediction_type='v_prediction' )
a__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=__snake_case )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ = sample.to(__snake_case )
for t in scheduler.timesteps:
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def lowerCamelCase__( self :Dict ) -> Any:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__snake_case ,use_karras_sigmas=__snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=__snake_case )
a__ = torch.manual_seed(0 )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a__ = sample.to(__snake_case )
for t in scheduler.timesteps:
a__ = scheduler.scale_model_input(__snake_case ,__snake_case )
a__ = model(__snake_case ,__snake_case )
a__ = scheduler.step(__snake_case ,__snake_case ,__snake_case ,generator=__snake_case )
a__ = output.prev_sample
a__ = torch.sum(torch.abs(__snake_case ) )
a__ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[Any]:
_UpperCAmelCase = len(__snake_case ), len(grid[0] )
if (
min(__snake_case , __snake_case ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCAmelCase = 0
count += depth_first_search(__snake_case , row + 1 , __snake_case , __snake_case )
count += depth_first_search(__snake_case , row - 1 , __snake_case , __snake_case )
count += depth_first_search(__snake_case , __snake_case , col + 1 , __snake_case )
count += depth_first_search(__snake_case , __snake_case , col - 1 , __snake_case )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def A__ ( A : Optional[int]):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = "mock-s3-bucket"
UpperCamelCase : List[str] = F'''s3://{mock_bucket}'''
UpperCamelCase : Optional[Any] = extract_path_from_uri(A)
assert dataset_path.startswith("s3://") is False
UpperCamelCase : Any = "./local/path"
UpperCamelCase : str = extract_path_from_uri(A)
assert dataset_path == new_dataset_path
def A__ ( A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[Any] = is_remote_filesystem(A)
assert is_remote is True
UpperCamelCase : Tuple = fsspec.filesystem("file")
UpperCamelCase : int = is_remote_filesystem(A)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , A)
def A__ ( A : List[Any] , A : Any , A : str , A : Union[str, Any] , A : List[str] , A : List[Any] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCamelCase : Any = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCamelCase : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A)
UpperCamelCase : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=A)
assert isinstance(A , A)
UpperCamelCase : List[Any] = os.path.basename(A)
UpperCamelCase : Union[str, Any] = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(A , "r" , encoding="utf-8") as f, open(A , encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"])
def A__ ( A : Optional[int] , A : str , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCamelCase : str = compressed_file_paths[protocol]
UpperCamelCase : Optional[int] = "dataset.jsonl"
UpperCamelCase : Tuple = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCamelCase , *UpperCamelCase : Dict = fsspec.get_fs_token_paths(A)
assert fs.isfile(A)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def A__ ( A : Dict , A : List[str] , A : Dict , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Optional[int] = hf_api.dataset_info(A , token=A)
UpperCamelCase : List[str] = HfFileSystem(repo_info=A , token=A)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(A) as f:
assert hffs.open("data/text_data.txt" , "r").read() == f.read()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : str = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(A , A , clobber=A)
with pytest.warns(A) as warning_info:
importlib.reload(datasets.filesystems)
assert len(A) == 1
assert (
str(warning_info[0].message)
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 173 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self :str , __magic_name__ :List[Any] , __magic_name__ :Tuple=3 , __magic_name__ :Optional[int]=32 , __magic_name__ :Optional[Any]=3 , __magic_name__ :Any=10 , __magic_name__ :str=[10, 20, 30, 40] , __magic_name__ :Any=[1, 1, 2, 1] , __magic_name__ :List[Any]=True , __magic_name__ :Optional[Any]=True , __magic_name__ :str="relu" , __magic_name__ :str=3 , __magic_name__ :str=None , ) ->Optional[Any]:
lowercase : List[str] = parent
lowercase : int = batch_size
lowercase : Union[str, Any] = image_size
lowercase : List[Any] = num_channels
lowercase : Any = embeddings_size
lowercase : Dict = hidden_sizes
lowercase : Any = depths
lowercase : int = is_training
lowercase : str = use_labels
lowercase : Optional[Any] = hidden_act
lowercase : Dict = num_labels
lowercase : Dict = scope
lowercase : Tuple = len(__magic_name__ )
def __snake_case ( self :Tuple ) ->Optional[Any]:
lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Any = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict = self.get_config()
return config, pixel_values, labels
def __snake_case ( self :Union[str, Any] ) ->Dict:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] ) ->Optional[Any]:
lowercase : int = TFResNetModel(config=__magic_name__ )
lowercase : int = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Dict ) ->List[Any]:
lowercase : Optional[int] = self.num_labels
lowercase : Dict = TFResNetForImageClassification(__magic_name__ )
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
lowercase : Optional[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Optional[Any] = config_and_inputs
lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase (__snake_case , __snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def __snake_case ( self :Dict ) ->str:
lowercase : Optional[int] = TFResNetModelTester(self )
lowercase : List[str] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __snake_case ( self :List[str] ) ->Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self :Any ) ->Union[str, Any]:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def __snake_case ( self :Optional[Any] ) ->List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def __snake_case ( self :str ) ->Dict:
pass
def __snake_case ( self :Optional[int] ) ->List[Any]:
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(__magic_name__ )
lowercase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :int ) ->Optional[Any]:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __snake_case ( self :List[Any] ) ->Optional[Any]:
def check_hidden_states_output(__magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :Any ):
lowercase : Any = model_class(__magic_name__ )
lowercase : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowercase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : Any = layer_type
lowercase : List[Any] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Optional[Any] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def __snake_case ( self :Dict ) ->Any:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def __snake_case ( self :Optional[int] ) ->Tuple:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase ( ) -> Any:
lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase (unittest.TestCase ):
@cached_property
def __snake_case ( self :int ) ->Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self :str ) ->List[str]:
lowercase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase : List[Any] = self.default_image_processor
lowercase : str = prepare_img()
lowercase : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowercase : Union[str, Any] = model(**__magic_name__ )
# verify the logits
lowercase : Tuple = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowercase : Union[str, Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1E-4 ) )
| 348 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCAmelCase = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :str , __magic_name__ :Path , __magic_name__ :Union[str, None] = None , __magic_name__ :Union[List[str], None] = None , __magic_name__ :Union[str, List[str], None] = None , __magic_name__ :bool = True , ) ->Optional[Any]:
lowercase : Dict = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
lowercase : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
lowercase : List[str] = [file for file in files if n_ not in file]
else:
lowercase : str = [file for file in files if n_identifier not in file]
lowercase : List[str] = ignore_files or []
ignore_files.append("""__init__.py""" )
lowercase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
lowercase : List[Any] = file.split(""".""" )[0]
try:
lowercase : Dict = getattr(__magic_name__ , __magic_name__ )
lowercase : Dict = doctest.DocTestSuite(__magic_name__ )
lowercase : Optional[int] = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
lowercase : List[str] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __snake_case ( self :Optional[Any] ) ->Dict:
lowercase : int = Path("""src/transformers""" )
lowercase : Tuple = """modeling"""
lowercase : List[str] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def __snake_case ( self :str ) ->str:
lowercase : Optional[int] = Path("""src/transformers""" )
lowercase : Tuple = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def __snake_case ( self :Optional[int] ) ->str:
lowercase : Tuple = Path("""src/transformers""" )
lowercase : List[Any] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def __snake_case ( self :Tuple ) ->Any:
lowercase : str = Path("""src/transformers""" )
lowercase : Optional[int] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def __snake_case ( self :List[str] ) ->Tuple:
lowercase : List[str] = Path("""docs/source""" )
lowercase : int = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 348 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict=False ):
'''simple docstring'''
_UpperCamelCase : Tuple = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_UpperCamelCase : Any = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
return inputs_dict
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any]=13 ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : Optional[Any]=2 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : List[str]=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Optional[int]=3 ,lowerCamelCase__ : str=4 ,lowerCamelCase__ : int=None ,):
'''simple docstring'''
_UpperCamelCase : List[str] = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : List[Any] = seq_length
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Union[str, Any] = use_input_mask
_UpperCamelCase : Optional[int] = use_token_type_ids
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : str = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : Tuple = type_vocab_size
_UpperCamelCase : List[Any] = type_sequence_label_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[Any] = num_labels
_UpperCamelCase : Optional[Any] = num_choices
_UpperCamelCase : Dict = scope
_UpperCamelCase : List[str] = embedding_size
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : List[str] = None
if self.use_token_type_ids:
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
_UpperCamelCase : List[str] = MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,embedding_size=self.embedding_size ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = TFMobileBertModel(config=lowerCamelCase__ )
_UpperCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCamelCase : Tuple = model(lowerCamelCase__ )
_UpperCamelCase : int = [input_ids, input_mask]
_UpperCamelCase : Optional[int] = model(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Any = TFMobileBertForMaskedLM(config=lowerCamelCase__ )
_UpperCamelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=lowerCamelCase__ )
_UpperCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = TFMobileBertForPreTraining(config=lowerCamelCase__ )
_UpperCamelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCamelCase : str = model(lowerCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = TFMobileBertForSequenceClassification(config=lowerCamelCase__ )
_UpperCamelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.num_choices
_UpperCamelCase : str = TFMobileBertForMultipleChoice(config=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(lowerCamelCase__ ,1 ) ,(1, self.num_choices, 1) )
_UpperCamelCase : List[str] = tf.tile(tf.expand_dims(lowerCamelCase__ ,1 ) ,(1, self.num_choices, 1) )
_UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase__ ,1 ) ,(1, self.num_choices, 1) )
_UpperCamelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_UpperCamelCase : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.num_labels
_UpperCamelCase : List[Any] = TFMobileBertForTokenClassification(config=lowerCamelCase__ )
_UpperCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = TFMobileBertForQuestionAnswering(config=lowerCamelCase__ )
_UpperCamelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Tuple = config_and_inputs
_UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCamelCase : Dict = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCamelCase : Tuple = TFMobileBertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ )[0]
_UpperCamelCase : Optional[Any] = [1, 6, 30522]
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,lowerCamelCase__ ,atol=1E-4 )
| 195 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
_UpperCamelCase : Optional[Any] = Path(UpperCAmelCase_ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
_UpperCamelCase : Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , UpperCAmelCase_ , with_cuda=UpperCAmelCase_ , extra_include_paths=[str(UpperCAmelCase_ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 195 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( ) -> Iterator[int]:
_snake_case = 2
while True:
if is_prime(__lowerCamelCase ):
yield num
num += 1
def _UpperCAmelCase ( __lowerCamelCase : int = 2_00_00_00 ) -> int:
return sum(takewhile(lambda __lowerCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 430 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = None
# Automatically constructed
__a = "dict"
__a = None
__a = field(default="""Translation""" , init=A_ , repr=A_ )
def __call__( self : Optional[Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase ( self : Any ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase__ :
__a = None
__a = None
__a = None
# Automatically constructed
__a = "dict"
__a = None
__a = field(default="""TranslationVariableLanguages""" , init=A_ , repr=A_ )
def lowercase ( self : str ):
_snake_case = sorted(set(self.languages ) ) if self.languages else None
_snake_case = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase ( self : Tuple , _lowerCamelCase : List[Any] ):
_snake_case = set(self.languages )
if self.languages and set(_lowerCamelCase ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(_lowerCamelCase ) - lang_set ) )}) are not in valid set ({', '.join(_lowerCamelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_snake_case = []
for lang, text in translation_dict.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_snake_case , _snake_case = zip(*sorted(_lowerCamelCase ) )
return {"language": languages, "translation": translations}
def lowercase ( self : List[Any] ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 430 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : str ) -> Dict:
_A = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_A = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_A = 'The dog is cute and lives in the garden house'
_A = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
_A = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
_A = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
_A = model(UpperCamelCase__ )['last_hidden_state']
self.assertEqual(output.shape, UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1], UpperCamelCase__, atol=1e-3 ) )
| 107 |
import argparse
import copy
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
with open(__lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str:
with open(__lowerCAmelCase ) as f:
__lowerCamelCase = f.read(1 )
__lowerCamelCase = start_node
__lowerCamelCase = []
__lowerCamelCase = start_node
__lowerCamelCase = 0
while visiting not in first_solution:
__lowerCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowerCAmelCase ) and k[0] not in first_solution:
__lowerCamelCase = k[1]
__lowerCamelCase = k[0]
first_solution.append(__lowerCAmelCase )
__lowerCamelCase = distance_of_first_solution + int(__lowerCAmelCase )
__lowerCamelCase = best_node
first_solution.append(__lowerCAmelCase )
__lowerCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowerCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> Dict:
__lowerCamelCase = []
for n in solution[1:-1]:
__lowerCamelCase = solution.index(__lowerCAmelCase )
for kn in solution[1:-1]:
__lowerCamelCase = solution.index(__lowerCAmelCase )
if n == kn:
continue
__lowerCamelCase = copy.deepcopy(__lowerCAmelCase )
__lowerCamelCase = kn
__lowerCamelCase = n
__lowerCamelCase = 0
for k in _tmp[:-1]:
__lowerCamelCase = _tmp[_tmp.index(__lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowerCamelCase = distance + int(i[1] )
_tmp.append(__lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowerCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase = 1
__lowerCamelCase = first_solution
__lowerCamelCase = []
__lowerCamelCase = distance_of_first_solution
__lowerCamelCase = solution
while count <= iters:
__lowerCamelCase = find_neighborhood(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = 0
__lowerCamelCase = neighborhood[index_of_best_solution]
__lowerCamelCase = len(__lowerCAmelCase ) - 1
__lowerCamelCase = False
while not found:
__lowerCamelCase = 0
while i < len(__lowerCAmelCase ):
if best_solution[i] != solution[i]:
__lowerCamelCase = best_solution[i]
__lowerCamelCase = solution[i]
break
__lowerCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowerCamelCase = True
__lowerCamelCase = best_solution[:-1]
__lowerCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowerCamelCase = cost
__lowerCamelCase = solution
else:
__lowerCamelCase = index_of_best_solution + 1
__lowerCamelCase = neighborhood[index_of_best_solution]
if len(__lowerCAmelCase ) >= size:
tabu_list.pop(0 )
__lowerCamelCase = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( __lowerCAmelCase : Union[str, Any]=None ) -> Any:
__lowerCamelCase = generate_neighbours(args.File )
__lowerCamelCase , __lowerCamelCase = generate_first_solution(
args.File , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = tabu_search(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 298 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """donut-swin"""
__magic_name__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase__=2_2_4 , UpperCAmelCase__=4 , UpperCAmelCase__=3 , UpperCAmelCase__=9_6 , UpperCAmelCase__=[2, 2, 6, 2] , UpperCAmelCase__=[3, 6, 1_2, 2_4] , UpperCAmelCase__=7 , UpperCAmelCase__=4.0 , UpperCAmelCase__=True , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__="gelu" , UpperCAmelCase__=False , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-5 , **UpperCAmelCase__ , ) -> Tuple:
super().__init__(**UpperCAmelCase__ )
_A : Any = image_size
_A : Optional[Any] = patch_size
_A : Dict = num_channels
_A : Optional[Any] = embed_dim
_A : Optional[Any] = depths
_A : Optional[int] = len(UpperCAmelCase__ )
_A : Tuple = num_heads
_A : str = window_size
_A : Optional[Any] = mlp_ratio
_A : Optional[Any] = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : int = hidden_act
_A : List[str] = use_absolute_embeddings
_A : Union[str, Any] = layer_norm_eps
_A : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A : str = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
| 417 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__UpperCamelCase : Any = numpy.array([0, 0])
__UpperCamelCase : Optional[int] = numpy.array([0.5, 0.8_660_254])
__UpperCamelCase : int = numpy.array([1, 0])
__UpperCamelCase : Dict = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowercase ( lowerCAmelCase : list[numpy.ndarray] , lowerCAmelCase : int):
"""simple docstring"""
_A : str = initial_vectors
for _ in range(lowerCAmelCase):
_A : Any = iteration_step(lowerCAmelCase)
return vectors
def lowercase ( lowerCAmelCase : list[numpy.ndarray]):
"""simple docstring"""
_A : Any = []
for i, start_vector in enumerate(vectors[:-1]):
_A : List[Any] = vectors[i + 1]
new_vectors.append(lowerCAmelCase)
_A : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3)
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60))
new_vectors.append(start_vector + difference_vector * 2 / 3)
new_vectors.append(vectors[-1])
return new_vectors
def lowercase ( lowerCAmelCase : numpy.ndarray , lowerCAmelCase : float):
"""simple docstring"""
_A : Any = numpy.radians(lowerCAmelCase)
_A , _A : str = numpy.cos(lowerCAmelCase), numpy.sin(lowerCAmelCase)
_A : Dict = numpy.array(((c, -s), (s, c)))
return numpy.dot(lowerCAmelCase , lowerCAmelCase)
def lowercase ( lowerCAmelCase : list[numpy.ndarray]):
"""simple docstring"""
_A : Tuple = plt.gca()
axes.set_aspect('''equal''')
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_A , _A : Any = zip(*lowerCAmelCase)
plt.plot(lowerCAmelCase , lowerCAmelCase)
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[int] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 417 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCamelCase : List[str] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class a__ ( A__ ):
A = 'facebook/nllb-200-distilled-600M'
A = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
A = 'translator'
A = AutoTokenizer
A = AutoModelForSeqaSeqLM
A = LANGUAGE_CODES
A = ['text', 'text', 'text']
A = ['text']
def __UpperCamelCase ( self : Optional[Any],_A : Optional[Any],_A : str,_A : List[str] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
SCREAMING_SNAKE_CASE_ : List[str] = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE_ : Tuple = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_A,return_tensors="pt",src_lang=_A,tgt_lang=_A )
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**_A )
def __UpperCamelCase ( self : Optional[int],_A : Any ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist(),skip_special_tokens=_A )
| 216 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( A__ ):
A = ['image_processor', 'tokenizer']
A = 'ViTImageProcessor'
A = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str],_A : Optional[Any]=None,_A : List[str]=None,**_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",_A,)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A,_A )
def __call__( self : Optional[Any],_A : Any=None,_A : Tuple=None,_A : Dict=None,_A : Optional[Any]=None,**_A : int ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(_A,return_tensors=_A,**_A )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor(_A,return_tensors=_A,**_A )
if images is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor(_A,return_tensors=_A,**_A )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : str = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_A ),tensor_type=_A )
def __UpperCamelCase ( self : int,*_A : Optional[Any],**_A : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A,**_A )
def __UpperCamelCase ( self : Tuple,*_A : Dict,**_A : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*_A,**_A )
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",_A,)
return self.image_processor_class
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",_A,)
return self.image_processor
| 216 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , snake_case : List[Any] , snake_case : Optional[Any]=sys.maxsize ):
__UpperCamelCase = '''bilinear'''
__UpperCamelCase = max_size
__UpperCamelCase = short_edge_length
def __call__( self : Any , snake_case : Union[str, Any] ):
__UpperCamelCase = []
for img in imgs:
__UpperCamelCase , __UpperCamelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__UpperCamelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__UpperCamelCase = size * 1.0 / min(snake_case_ , snake_case_ )
if h < w:
__UpperCamelCase , __UpperCamelCase = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase = scale * h, size
if max(snake_case_ , snake_case_ ) > self.max_size:
__UpperCamelCase = self.max_size * 1.0 / max(snake_case_ , snake_case_ )
__UpperCamelCase = newh * scale
__UpperCamelCase = neww * scale
__UpperCamelCase = int(neww + 0.5 )
__UpperCamelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__UpperCamelCase = Image.fromarray(snake_case_ )
__UpperCamelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__UpperCamelCase = np.asarray(snake_case_ )
else:
__UpperCamelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__UpperCamelCase = nn.functional.interpolate(
snake_case_ , (newh, neww) , mode=self.interp_method , align_corners=snake_case_ ).squeeze(0 )
img_augs.append(snake_case_ )
return img_augs
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , snake_case : List[str] ):
__UpperCamelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__UpperCamelCase = cfg.INPUT.FORMAT
__UpperCamelCase = cfg.SIZE_DIVISIBILITY
__UpperCamelCase = cfg.PAD_VALUE
__UpperCamelCase = cfg.INPUT.MAX_SIZE_TEST
__UpperCamelCase = cfg.MODEL.DEVICE
__UpperCamelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCamelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCamelCase = lambda snake_case : (x - self.pixel_mean) / self.pixel_std
def snake_case ( self : Tuple , snake_case : Optional[int] ):
__UpperCamelCase = tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) )
__UpperCamelCase = [im.shape[-2:] for im in images]
__UpperCamelCase = [
nn.functional.pad(
snake_case_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case_ , snake_case_ )
]
return torch.stack(snake_case_ ), torch.tensor(snake_case_ )
def __call__( self : Optional[Any] , snake_case : int , snake_case : Optional[Any]=False ):
with torch.no_grad():
if not isinstance(snake_case_ , snake_case_ ):
__UpperCamelCase = [images]
if single_image:
assert len(snake_case_ ) == 1
for i in range(len(snake_case_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case_ , images.pop(snake_case_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case_ , torch.as_tensor(img_tensorize(images.pop(snake_case_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__UpperCamelCase = torch.tensor([im.shape[:2] for im in images] )
__UpperCamelCase = self.aug(snake_case_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__UpperCamelCase = [self.normalizer(snake_case_ ) for x in images]
# now pad them to do the following operations
__UpperCamelCase , __UpperCamelCase = self.pad(snake_case_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__UpperCamelCase = torch.true_divide(snake_case_ , snake_case_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
assert torch.isfinite(lowercase_ ).all(), "Box tensor contains infinite or NaN!"
__UpperCamelCase , __UpperCamelCase = box_size
tensor[:, 0].clamp_(min=0 , max=lowercase_ )
tensor[:, 1].clamp_(min=0 , max=lowercase_ )
tensor[:, 2].clamp_(min=0 , max=lowercase_ )
tensor[:, 3].clamp_(min=0 , max=lowercase_ )
| 720 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = "unispeech"
def __init__( self : str , snake_case : Union[str, Any]=32 , snake_case : Optional[Any]=768 , snake_case : Dict=12 , snake_case : Tuple=12 , snake_case : Optional[Any]=3072 , snake_case : Any="gelu" , snake_case : Dict=0.1 , snake_case : Tuple=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=0.0 , snake_case : Any=0.0 , snake_case : Optional[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Optional[int]=0.02 , snake_case : List[str]=1E-5 , snake_case : str="group" , snake_case : List[Any]="gelu" , snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case : List[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case : Tuple=False , snake_case : Optional[int]=128 , snake_case : List[str]=16 , snake_case : List[str]=False , snake_case : Dict=True , snake_case : Optional[Any]=0.05 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=2 , snake_case : List[str]=0.0 , snake_case : str=10 , snake_case : int=0 , snake_case : Tuple=320 , snake_case : Any=2 , snake_case : List[str]=0.1 , snake_case : Optional[Any]=100 , snake_case : List[Any]=256 , snake_case : Union[str, Any]=256 , snake_case : Any=0.1 , snake_case : str="mean" , snake_case : Union[str, Any]=False , snake_case : str=False , snake_case : Union[str, Any]=256 , snake_case : Optional[Any]=80 , snake_case : str=0 , snake_case : int=1 , snake_case : int=2 , snake_case : Dict=0.5 , **snake_case : Optional[int] , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = feat_quantizer_dropout
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# pretraining loss
__UpperCamelCase = replace_prob
@property
def snake_case ( self : Dict ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 375 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case : List[str] = TypeVar('T')
__snake_case : Union[str, Any] = Union[List[T], Tuple[T, ...]]
__snake_case : Union[str, Any] = Union[T, List[T], Dict[str, T]]
__snake_case : List[str] = Union[str, bytes, os.PathLike] | 293 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 413 | 0 |
"""simple docstring"""
import argparse
import os
import re
_UpperCamelCase : List[str] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_UpperCamelCase : Tuple = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
_UpperCamelCase : List[str] = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def snake_case (A_ :int , A_ :bool = False ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' ) as f:
a : Optional[int] = f.read()
a : Tuple = content.split('\n' )
a : List[Any] = []
a : str = 0
while line_idx < len(A_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a : List[str] = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
a : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a : str = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a : Dict = sorted(A_ , key=lambda A_ : _re_identifier.search(A_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A_ ) )
elif "\n".join(A_ ) != content:
return True
def snake_case (A_ :bool = False ):
'''simple docstring'''
a : Dict = [os.path.join(A_ , A_ ) for f in os.listdir(A_ ) if f.endswith('.py' )]
a : Optional[int] = [sort_auto_mapping(A_ , overwrite=A_ ) for fname in fnames]
if not overwrite and any(A_ ):
a : Optional[int] = [f for f, d in zip(A_ , A_ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(A_ )}. Run `make style` to fix'''
' this.' )
if __name__ == "__main__":
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 118 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case (A_ :Optional[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class snake_case ( UpperCAmelCase ):
@staticmethod
def lowerCamelCase__ ( A : ArgumentParser ):
'''simple docstring'''
a : Any = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=A , default=A , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=A , help='Name of the model to download' )
download_parser.set_defaults(func=A )
def __init__( self : Any , A : str , A : str , A : bool , A : bool ):
'''simple docstring'''
a : int = model
a : List[Any] = cache
a : Dict = force
a : Tuple = trust_remote_code
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 118 | 1 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=True ) -> Optional[Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCAmelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
_UpperCAmelCase = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = True
_UpperCAmelCase = True
print(F'''Building TensorFlow model from configuration: {config}''' )
_UpperCAmelCase = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCAmelCase = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
_UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
_UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
_UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
_UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
_UpperCAmelCase = pto[0].numpy()
_UpperCAmelCase = tfo[0].numpy()
_UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format='''h5''' )
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Optional[int]:
"""simple docstring"""
if args_model_type is None:
_UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
_UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print('''=''' * 1_00 )
print(F''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print('''=''' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print('''-''' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 1_00 )
if config_shortcut_name in aws_config_map:
_UpperCAmelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
_UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCAmelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
_UpperCAmelCase = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
UpperCAmelCase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
) | 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 514 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase_ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] ) -> List[str]:
if args.student_type == "roberta":
UpperCamelCase__ : str = False
elif args.student_type == "gpt2":
UpperCamelCase__ : Union[str, Any] = False
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[str]:
if args.student_type == "roberta":
UpperCamelCase__ : Dict = False
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase__ : str = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=SCREAMING_SNAKE_CASE_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=SCREAMING_SNAKE_CASE_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=SCREAMING_SNAKE_CASE_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=SCREAMING_SNAKE_CASE_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=SCREAMING_SNAKE_CASE_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=SCREAMING_SNAKE_CASE_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=SCREAMING_SNAKE_CASE_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=SCREAMING_SNAKE_CASE_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=SCREAMING_SNAKE_CASE_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=SCREAMING_SNAKE_CASE_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=SCREAMING_SNAKE_CASE_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=SCREAMING_SNAKE_CASE_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=SCREAMING_SNAKE_CASE_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=SCREAMING_SNAKE_CASE_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=SCREAMING_SNAKE_CASE_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=SCREAMING_SNAKE_CASE_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=SCREAMING_SNAKE_CASE_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=SCREAMING_SNAKE_CASE_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=SCREAMING_SNAKE_CASE_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=SCREAMING_SNAKE_CASE_ , default=4000 , help='''Checkpoint interval.''' )
UpperCamelCase__ : str = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE_ )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE_ )
set_seed(SCREAMING_SNAKE_CASE_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , indent=4 )
git_log(args.dump_path )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = MODEL_CLASSES[args.student_type]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase__ : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ : List[Any] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ : Tuple = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
UpperCamelCase__ : str = special_tok_ids
UpperCamelCase__ : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
UpperCamelCase__ : Any = pickle.load(SCREAMING_SNAKE_CASE_ )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCamelCase__ : Optional[int] = pickle.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ : Optional[Any] = np.maximum(SCREAMING_SNAKE_CASE_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ : Optional[Any] = 0.0 # do not predict special tokens
UpperCamelCase__ : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE_ , data=SCREAMING_SNAKE_CASE_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
UpperCamelCase__ : List[Any] = student_config_class.from_pretrained(args.student_config )
UpperCamelCase__ : int = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCamelCase__ : Tuple = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ : Dict = student_model_class(SCREAMING_SNAKE_CASE_ )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCamelCase__ : Dict = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE_ )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ : Any = Distiller(
params=SCREAMING_SNAKE_CASE_ , dataset=SCREAMING_SNAKE_CASE_ , token_probs=SCREAMING_SNAKE_CASE_ , student=SCREAMING_SNAKE_CASE_ , teacher=SCREAMING_SNAKE_CASE_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 715 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Any = "upernet"
def __init__( self, __magic_name__=None, __magic_name__=512, __magic_name__=0.02, __magic_name__=[1, 2, 3, 6], __magic_name__=True, __magic_name__=0.4, __magic_name__=384, __magic_name__=256, __magic_name__=1, __magic_name__=False, __magic_name__=255, **__magic_name__, ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase__ : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase__ : List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Optional[int] = config_class.from_dict(__magic_name__ )
UpperCamelCase__ : Dict = backbone_config
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = pool_scales
UpperCamelCase__ : Tuple = use_auxiliary_head
UpperCamelCase__ : Tuple = auxiliary_loss_weight
UpperCamelCase__ : Optional[int] = auxiliary_in_channels
UpperCamelCase__ : Optional[Any] = auxiliary_channels
UpperCamelCase__ : Any = auxiliary_num_convs
UpperCamelCase__ : Union[str, Any] = auxiliary_concat_input
UpperCamelCase__ : List[str] = loss_ignore_index
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ : int = self.backbone_config.to_dict()
UpperCamelCase__ : Any = self.__class__.model_type
return output
| 369 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''DeiTFeatureExtractor''']
snake_case = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_5_5 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :Union[str, Any] = num_channels
lowerCAmelCase__ :Any = min_resolution
lowerCAmelCase__ :Dict = max_resolution
lowerCAmelCase__ :Dict = do_resize
lowerCAmelCase__ :Optional[Any] = size
lowerCAmelCase__ :List[str] = do_normalize
lowerCAmelCase__ :str = image_mean
lowerCAmelCase__ :Tuple = image_std
lowerCAmelCase__ :Dict = do_rescale
lowerCAmelCase__ :Tuple = rescale_factor
lowerCAmelCase__ :Optional[int] = do_pad
def snake_case ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase__ :str = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ :str = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ :int = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ :List[str] = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
lowerCAmelCase__ :Any = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ :int = self.size['shortest_edge']
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
else:
lowerCAmelCase__ :Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ :List[Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ :Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :Tuple = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ :Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :str = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase__ :int = DetaImageProcessor()
lowerCAmelCase__ :List[Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ :str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase__ :Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ :Dict = DetaImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ :Optional[int] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ :Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 93 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCAmelCase_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ : Optional[Any] =precision
__magic_name__ : Optional[int] =ceil(precision / 14 )
__magic_name__ : List[str] =426880 * Decimal(10005 ).sqrt()
__magic_name__ : str =1
__magic_name__ : List[str] =13591409
__magic_name__ : Tuple =Decimal(lowerCamelCase )
for k in range(1 , lowerCamelCase ):
__magic_name__ : List[str] =factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 367 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = BlenderbotSmallTokenizer
UpperCamelCase = False
def A__ ( self :Dict ):
'''simple docstring'''
super().setUp()
__magic_name__ : Tuple =["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
__magic_name__ : Optional[Any] =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__ : List[Any] =["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
__magic_name__ : int ={"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
__magic_name__ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def A__ ( self :Union[str, Any] , **__snake_case :Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def A__ ( self :str , __snake_case :List[Any] ):
'''simple docstring'''
__magic_name__ : Any ="""adapt act apte"""
__magic_name__ : str ="""adapt act apte"""
return input_text, output_text
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : List[str] ="""adapt act apte"""
__magic_name__ : Optional[int] =["""adapt""", """act""", """ap@@""", """te"""]
__magic_name__ : List[Any] =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__ : int =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__magic_name__ : Any =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Any =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
__magic_name__ : Tuple ="""I am a small frog."""
__magic_name__ : Tuple =tok([src_text] , padding=__snake_case , truncation=__snake_case )["""input_ids"""]
__magic_name__ : Optional[Any] =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
__magic_name__ : int ="""I am a small frog ."""
__magic_name__ : List[Any] ="""."""
__magic_name__ : List[str] =tok(__snake_case )["""input_ids"""]
__magic_name__ : List[str] =tok(__snake_case )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 367 | 1 |
import numpy as np
import datasets
__a: List[Any] = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__a: Union[str, Any] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__a: int = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def lowerCamelCase ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = np.array(lowerCamelCase )
_UpperCAmelCase = np.array(lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
_UpperCAmelCase = X - np.mean(lowerCamelCase )
_UpperCAmelCase = np.cov(reference_distribution.T )
try:
_UpperCAmelCase = np.linalg.inv(lowerCamelCase )
except np.linalg.LinAlgError:
_UpperCAmelCase = np.linalg.pinv(lowerCamelCase )
_UpperCAmelCase = np.dot(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = np.dot(lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 108 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : torch.FloatTensor
UpperCAmelCase_ : Optional[torch.FloatTensor] =None
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : str=0.9_9_9 , lowercase : str="cosine" , ) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase : str ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase : Optional[int] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__snake_case : List[str] = []
for i in range(lowercase ):
__snake_case : Optional[Any] = i / num_diffusion_timesteps
__snake_case : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase ) / alpha_bar_fn(lowercase ) , lowercase ) )
return torch.tensor(lowercase , dtype=torch.floataa )
class _lowerCamelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = "fixed_small_log" , UpperCAmelCase = True , UpperCAmelCase = 1.0 , UpperCAmelCase = "epsilon" , UpperCAmelCase = "squaredcos_cap_v2" , ) -> List[str]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
__snake_case : Optional[Any] = betas_for_alpha_bar(UpperCAmelCase )
__snake_case : Optional[int] = 1.0 - self.betas
__snake_case : str = torch.cumprod(self.alphas , dim=0 )
__snake_case : Optional[Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__snake_case : str = 1.0
# setable values
__snake_case : Optional[int] = None
__snake_case : Tuple = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
__snake_case : Union[str, Any] = variance_type
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = num_inference_steps
__snake_case : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__snake_case : str = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__snake_case : Optional[int] = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> str:
'''simple docstring'''
if prev_timestep is None:
__snake_case : List[Any] = t - 1
__snake_case : str = self.alphas_cumprod[t]
__snake_case : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__snake_case : Dict = 1 - alpha_prod_t
__snake_case : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__snake_case : Optional[int] = self.betas[t]
else:
__snake_case : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__snake_case : Optional[int] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__snake_case : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__snake_case : Tuple = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
__snake_case : Any = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__snake_case : Optional[int] = variance.log()
__snake_case : int = beta.log()
__snake_case : Optional[Any] = (predicted_variance + 1) / 2
__snake_case : Any = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__snake_case : Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__snake_case , __snake_case : Any = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
__snake_case : int = None
# 1. compute alphas, betas
if prev_timestep is None:
__snake_case : Optional[int] = t - 1
__snake_case : Dict = self.alphas_cumprod[t]
__snake_case : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__snake_case : Tuple = 1 - alpha_prod_t
__snake_case : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__snake_case : str = self.betas[t]
__snake_case : Optional[int] = self.alphas[t]
else:
__snake_case : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
__snake_case : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__snake_case : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__snake_case : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__snake_case : List[Any] = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__snake_case : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case : str = 0
if t > 0:
__snake_case : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
__snake_case : str = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
__snake_case : int = variance
elif self.variance_type == "learned_range":
__snake_case : str = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
__snake_case : Optional[int] = variance * variance_noise
__snake_case : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__snake_case : Any = timesteps.to(original_samples.device )
__snake_case : List[Any] = alphas_cumprod[timesteps] ** 0.5
__snake_case : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__snake_case : int = sqrt_alpha_prod.unsqueeze(-1 )
__snake_case : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__snake_case : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__snake_case : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 243 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = []
snake_case : Tuple = []
for i in range(self.num_layers ):
snake_case : Any = self.in_channels if i == 0 else self.out_channels
snake_case : Optional[int] = FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
snake_case : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
snake_case : List[Any] = resnets
snake_case : List[Any] = attentions
if self.add_downsample:
snake_case : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case : str = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
snake_case : int = attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case : Any = self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Dict = []
for i in range(self.num_layers ):
snake_case : List[str] = self.in_channels if i == 0 else self.out_channels
snake_case : str = FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
snake_case : str = resnets
if self.add_downsample:
snake_case : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = ()
for resnet in self.resnets:
snake_case : Optional[Any] = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case : Tuple = self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = []
snake_case : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
snake_case : int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
snake_case : str = resnets
snake_case : List[Any] = attentions
if self.add_upsample:
snake_case : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case : Dict = res_hidden_states_tuple[-1]
snake_case : Optional[int] = res_hidden_states_tuple[:-1]
snake_case : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case : List[Any] = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
snake_case : Any = attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
snake_case : int = self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = []
for i in range(self.num_layers ):
snake_case : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
snake_case : Optional[int] = resnets
if self.add_upsample:
snake_case : Dict = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ) -> int:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case : int = res_hidden_states_tuple[-1]
snake_case : List[Any] = res_hidden_states_tuple[:-1]
snake_case : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case : Dict = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
snake_case : Optional[int] = self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case : Any = []
for _ in range(self.num_layers ):
snake_case : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
snake_case : List[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
snake_case : Union[str, Any] = resnets
snake_case : str = attentions
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = self.resnets[0](UpperCamelCase__ , UpperCamelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case : Union[str, Any] = attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
snake_case : Optional[Any] = resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
return hidden_states
| 714 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Union[str, Any] = '''xmod'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("en_XX",) , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : Optional[int] = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Dict = position_embedding_type
snake_case : Any = use_cache
snake_case : str = classifier_dropout
snake_case : Any = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Optional[int] = adapter_layer_norm
snake_case : int = adapter_reuse_layer_norm
snake_case : List[Any] = ln_before_adapter
snake_case : Tuple = list(UpperCamelCase__ )
snake_case : Tuple = default_language
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 117 | 0 |
from __future__ import annotations
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 402 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Union[str, Any] =old_name
if "patch_embed" in old_name:
snake_case__, snake_case__, snake_case__ : int =old_name.split('''.''' )
if layer == "0":
snake_case__ : Tuple =old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
snake_case__ : int =old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
snake_case__ : str =old_name.replace('''3''' , '''convolution2''' )
else:
snake_case__ : Tuple =old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =R'''\b\d{2}\b'''
if bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
snake_case__ : Any =re.search(R'''\d\.\d\d.''' , SCREAMING_SNAKE_CASE ).group()
else:
snake_case__ : List[Any] =re.search(R'''\d\.\d.''' , SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
snake_case__ : int =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
snake_case__ : Tuple =trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
snake_case__ : Union[str, Any] ='''intermediate_stages.''' + trimmed_name
else:
snake_case__ : Optional[int] =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
snake_case__ : Optional[Any] =str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
snake_case__ : Tuple =trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
snake_case__ : Union[str, Any] =trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
snake_case__ : str =trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
snake_case__ : Optional[Any] =trimmed_name.replace('''fc2''' , '''linear_out''' )
snake_case__ : Dict ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , SCREAMING_SNAKE_CASE ):
snake_case__ : int =old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ : Any =new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ : Dict =new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
snake_case__ : List[Any] =new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
snake_case__ : List[Any] =new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
snake_case__ : Union[str, Any] ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ : int =new_name.replace('''norm''' , '''layernorm''' )
snake_case__ : Dict ='''efficientformer.''' + new_name
else:
snake_case__ : List[Any] ='''efficientformer.encoder.''' + new_name
return new_name
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
snake_case__ : List[Any] =checkpoint.pop(SCREAMING_SNAKE_CASE )
snake_case__ : Any =val
return checkpoint
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
def lowercase_ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
snake_case__ : Union[str, Any] =torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
snake_case__ : int =EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE )
snake_case__ : Dict =EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
snake_case__ : List[Any] =config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ : Dict =convert_torch_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
snake_case__ : Any =prepare_img()
snake_case__ : str =2_56
snake_case__ : List[Any] =2_24
snake_case__ : Any =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
snake_case__ : int =processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
# original processing pipeline
snake_case__ : List[str] =Compose(
[
Resize(SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
] )
snake_case__ : Tuple =image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =outputs.logits
snake_case__ : Optional[Any] =(1, 10_00)
if "l1" in model_name:
snake_case__ : Union[str, Any] =torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ : Optional[Any] =torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ : Dict =torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 381 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _A ):
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 551 |
'''simple docstring'''
def __A ( a_ : int ):
if not isinstance(a_ ,a_ ):
lowerCAmelCase : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
lowerCAmelCase : Dict = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 1 |
import sys
def a_ ( __lowercase : List[Any] ) -> Dict:
_snake_case = len(lowercase__ )
_snake_case = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
_snake_case = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
_snake_case = a + chain_length - 1
_snake_case = sys.maxsize
for c in range(lowercase__ , lowercase__ ):
_snake_case = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_snake_case = cost
_snake_case = c
return matrix, sol
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Tuple ) -> List[str]:
if i == j:
print('A' + str(lowercase__ ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(')' , end=' ' )
def a_ ( ) -> List[str]:
_snake_case = [30, 35, 15, 5, 10, 20, 25]
_snake_case = len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_snake_case = matrix_chain_order(lowercase__ )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main() | 686 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_UpperCamelCase = TaTokenizerFast
_UpperCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_UpperCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 453 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self :str , lowerCamelCase_ :bool , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[int] = None ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE : str = torch.zeros(lowerCamelCase__ , lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = torch.nn.Parameter(lowerCamelCase__ )
class lowercase__( UpperCAmelCase__ ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :int , lowerCamelCase_ :VQModel , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :TransformeraDModel , lowerCamelCase_ :VQDiffusionScheduler , lowerCamelCase_ :LearnedClassifierFreeSamplingEmbeddings , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=lowerCamelCase__ , transformer=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCamelCase__ )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE : List[str] = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase__ , 1 , 1 )
else:
SCREAMING_SNAKE_CASE : Optional[int] = [""] * batch_size
SCREAMING_SNAKE_CASE : List[Any] = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE : Any = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCamelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : Optional[Any] = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE : str = negative_prompt_embeds.repeat(1 , lowerCamelCase__ , 1 )
SCREAMING_SNAKE_CASE : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self :Tuple , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 1_00 , lowerCamelCase_ :float = 5.0 , lowerCamelCase_ :float = 1.0 , lowerCamelCase_ :int = 1 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = 1
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase__ )}" )
SCREAMING_SNAKE_CASE : List[str] = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
SCREAMING_SNAKE_CASE : Union[str, Any] = self._encode_prompt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(lowerCamelCase__ )}." )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE : Optional[int] = torch.full(lowerCamelCase__ , lowerCamelCase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
SCREAMING_SNAKE_CASE : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
SCREAMING_SNAKE_CASE : str = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE : Any = latents
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE : Any = self.transformer(lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , timestep=lowerCamelCase__ ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : int = model_output.chunk(2 )
SCREAMING_SNAKE_CASE : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCamelCase__ , dim=1 , keepdim=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.truncate(lowerCamelCase__ , lowerCamelCase__ )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE : Optional[int] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Any = self.scheduler.step(lowerCamelCase__ , timestep=lowerCamelCase__ , sample=lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE : Optional[int] = self.vqvae.quantize.get_codebook_entry(lowerCamelCase__ , shape=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.vqvae.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__ ).sample
SCREAMING_SNAKE_CASE : str = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :float ) -> torch.FloatTensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.sort(lowerCamelCase__ , 1 , descending=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.exp(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE : List[str] = torch.full_like(keep_mask[:, 0:1, :] , lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE : str = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE : str = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE : int = log_p_x_0.clone()
SCREAMING_SNAKE_CASE : int = -torch.inf # -inf = log(0)
return rv
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[Any] = logging.get_logger(__name__)
a__ : Dict = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __snake_case ( __magic_name__ , __magic_name__ ):
__lowerCAmelCase = '''bit'''
__lowerCAmelCase = ['''preactivation''', '''bottleneck''']
__lowerCAmelCase = ['''SAME''', '''VALID''']
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=[256, 512, 1024, 2048] , UpperCamelCase_=[3, 4, 6, 3] , UpperCamelCase_="preactivation" , UpperCamelCase_="relu" , UpperCamelCase_=None , UpperCamelCase_=32 , UpperCamelCase_=0.0 , UpperCamelCase_=False , UpperCamelCase_=32 , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Dict:
super().__init__(**UpperCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
snake_case__ = num_channels
snake_case__ = embedding_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = layer_type
snake_case__ = hidden_act
snake_case__ = global_padding
snake_case__ = num_groups
snake_case__ = drop_path_rate
snake_case__ = embedding_dynamic_padding
snake_case__ = output_stride
snake_case__ = width_factor
snake_case__ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
snake_case__ , snake_case__ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 368 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) ->List[str]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
snake_case__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
# layernorm 1
snake_case__ = weights[0][0][0]
snake_case__ = np.asarray(layer_norm_a[0] )
snake_case__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# lsh weights + output
snake_case__ = weights[0][1]
if len(UpperCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
# intermediate weighs
snake_case__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase_ ) == 4:
snake_case__ = intermediate_weights[2]
# layernorm 2
snake_case__ = np.asarray(intermediate_weights[0][0] )
snake_case__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# intermediate dense
snake_case__ = np.asarray(intermediate_weights[1][0] )
snake_case__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
# intermediate out
snake_case__ = np.asarray(intermediate_weights[4][0] )
snake_case__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
# reformer model
snake_case__ = torch_model.reformer
# word embeds
snake_case__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase_ ) , )
if isinstance(weights[3] , UpperCAmelCase_ ):
snake_case__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case__ = nn.Parameter(torch.tensor(UpperCAmelCase_ ) )
snake_case__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# output layer norm
snake_case__ = np.asarray(weights[7][0] )
snake_case__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# output embeddings
snake_case__ = np.asarray(weights[9][0] )
snake_case__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# Initialise PyTorch model
snake_case__ = ReformerConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case__ = ReformerModelWithLMHead(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'rb' ) as f:
snake_case__ = pickle.load(UpperCAmelCase_ )['weights']
set_model_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 368 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__lowerCAmelCase = {"unk_token": "<unk>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
__lowerCAmelCase = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = CLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 282 |
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for i in sequence:
__lowerCAmelCase = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def _lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
__lowerCAmelCase = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCamelCase )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=_UpperCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 282 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
A : List[Any] = 'naver-clova-ix/donut-base'
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : int =DonutProcessor.from_pretrained(__a )
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case : Optional[Any] ={
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case : Tuple =(
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case : Any =self.processor.tokenajson(__a )
self.assertDictEqual(__a, __a )
| 349 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = self.vocab_size - 1
def _a (self ):
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a (self , __a , __a , __a , __a , *__a ):
'''simple docstring'''
lowerCamelCase = OpenAIGPTModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a , token_type_ids=__a , head_mask=__a )
lowerCamelCase = model(__a , token_type_ids=__a )
lowerCamelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , __a , __a , __a , __a , *__a ):
'''simple docstring'''
lowerCamelCase = OpenAIGPTLMHeadModel(__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , __a , __a , __a , __a , *__a ):
'''simple docstring'''
lowerCamelCase = OpenAIGPTDoubleHeadsModel(__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , __a , __a , __a , __a , *__a ):
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = OpenAIGPTForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_A = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_A = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a (self , __a , __a , __a , __a , __a ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a (self , __a , __a , __a=False ):
'''simple docstring'''
lowerCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__a , )
lowerCamelCase = inputs_dict["labels"]
lowerCamelCase = inputs_dict["labels"]
lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__a , )
lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _a (self ):
'''simple docstring'''
lowerCamelCase = OpenAIGPTModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=__a , n_embd=37 )
def _a (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__a )
@slow
def _a (self ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = OpenAIGPTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__a )
lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=__a ) # the president is
lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a ) | 623 | 0 |
'''simple docstring'''
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not input_list:
return []
_lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list]
_lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Dict = len(_A )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )]
# Reverse whole list
_lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 630 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 |
from itertools import permutations
def lowerCAmelCase__ ( _a : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(_a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( _a : int = 10 ):
return sum(
int("".join(map(_a , _a ) ) )
for num in permutations(range(_a ) )
if is_substring_divisible(_a ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 568 | 0 |
def __lowerCamelCase ( ) -> list[list[int]]:
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_snake_case = generate_large_matrix()
_snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( _lowercase ) -> None:
assert all(row == sorted(_lowercase , reverse=_lowercase ) for row in grid )
assert all(list(_lowercase ) == sorted(_lowercase , reverse=_lowercase ) for col in zip(*_lowercase ) )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCamelCase = 0
UpperCamelCase = len(_lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase = (left + right) // 2
UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase = mid + 1
else:
UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowercase )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCamelCase = 0
UpperCamelCase = len(grid[0] )
for i in range(len(_lowercase ) ):
UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowercase ) * len(grid[0] )) - total
def __lowerCamelCase ( _lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCamelCase = 0
for row in grid:
for i, number in enumerate(_lowercase ):
if number < 0:
total += len(_lowercase ) - i
break
return total
def __lowerCamelCase ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
UpperCamelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase = timeit(F'{func}(grid=grid)' , setup=_lowercase , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 709 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =DanceDiffusionPipeline
SCREAMING_SNAKE_CASE_ : str =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : int =PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
SCREAMING_SNAKE_CASE_ : Optional[int] =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ , use_timestep_embedding=SCREAMING_SNAKE_CASE__ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
UpperCamelCase = IPNDMScheduler()
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=0 ):
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 170 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''funnel'''
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=[4, 4, 4] , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1e-9 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE="relative_shift" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Any = block_sizes
_lowerCamelCase : int = [1] * len(SCREAMING_SNAKE_CASE) if block_repeats is None else block_repeats
assert len(SCREAMING_SNAKE_CASE) == len(
self.block_repeats), "`block_sizes` and `block_repeats` should have the same length."
_lowerCamelCase : Tuple = num_decoder_layers
_lowerCamelCase : Dict = d_model
_lowerCamelCase : List[Any] = n_head
_lowerCamelCase : Dict = d_head
_lowerCamelCase : List[str] = d_inner
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : int = hidden_dropout
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : int = activation_dropout
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : str = initializer_std
_lowerCamelCase : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
_lowerCamelCase : List[str] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
_lowerCamelCase : int = attention_type
_lowerCamelCase : List[str] = separate_cls
_lowerCamelCase : Tuple = truncate_seq
_lowerCamelCase : List[Any] = pool_q_only
super().__init__(**SCREAMING_SNAKE_CASE)
@property
def UpperCamelCase_ ( self) -> Union[str, Any]:
return sum(self.block_sizes)
@num_hidden_layers.setter
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""")
@property
def UpperCamelCase_ ( self) -> List[Any]:
return len(self.block_sizes)
@num_blocks.setter
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]:
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""")
| 88 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Any = HfArgumentParser(__snake_case )
_lowerCamelCase : int = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case )
try:
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
_lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] )
_lowerCamelCase : Dict = """"""
_lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] )
_lowerCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
_lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
from __future__ import annotations
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not nums:
return 0
snake_case_ = nums[0]
snake_case_ = 0
for num in nums[1:]:
snake_case_ , snake_case_ = (
max_excluding + num,
max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
)
return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 701 |
import numpy as np
import datasets
UpperCAmelCase = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
UpperCAmelCase = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
UpperCAmelCase = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# convert to numpy arrays
snake_case_ = np.array(_UpperCAmelCase )
snake_case_ = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
snake_case_ = X - np.mean(_UpperCAmelCase )
snake_case_ = np.cov(reference_distribution.T )
try:
snake_case_ = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
snake_case_ = np.linalg.pinv(_UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 531 | 0 |
'''simple docstring'''
import numpy as np
import datasets
UpperCamelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
UpperCamelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
UpperCamelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float', id='sequence' ), id='X' ),
} ), )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = np.array(A )
SCREAMING_SNAKE_CASE : Tuple = np.array(A )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
SCREAMING_SNAKE_CASE : Any = X - np.mean(A )
SCREAMING_SNAKE_CASE : str = np.cov(reference_distribution.T )
try:
SCREAMING_SNAKE_CASE : str = np.linalg.inv(A )
except np.linalg.LinAlgError:
SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.pinv(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.dot(A, X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 28 | """simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : List[Any] , _A : Dict , _A : Dict):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A)
def __call__( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = self.unet(_A , _A).sample
_SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step(_A , _A , _A).prev_sample
_SCREAMING_SNAKE_CASE : str = scheduler_output - scheduler_output + torch.ones_like(_A)
return result
| 338 | 0 |
'''simple docstring'''
def _A ( _lowerCAmelCase = 1_000 ):
"""simple docstring"""
__lowercase =2**power
__lowercase =0
while n:
__lowercase , __lowercase =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 454 |
'''simple docstring'''
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict):
'''simple docstring'''
__lowercase =(0, 0)
__lowercase =None
__lowercase =0
__lowercase =0
__lowercase =0
def __eq__( self : List[str] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.position == cell.position
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
print(self.position)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : List[str]=(5, 5)):
'''simple docstring'''
__lowercase =np.zeros(_lowerCAmelCase)
__lowercase =world_size[0]
__lowercase =world_size[1]
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
print(self.w)
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowercase =cell.position[0]
__lowercase =cell.position[1]
__lowercase =[]
for n in neughbour_cord:
__lowercase =current_x + n[0]
__lowercase =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowercase =Cell()
__lowercase =(x, y)
__lowercase =cell
neighbours.append(_lowerCAmelCase)
return neighbours
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =[]
_open.append(_lowerCAmelCase )
while _open:
__lowercase =np.argmin([n.f for n in _open] )
__lowercase =_open[min_f]
_closed.append(_open.pop(_lowerCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCAmelCase ):
for c in _closed:
if c == n:
continue
__lowercase =current.g + 1
__lowercase , __lowercase =n.position
__lowercase , __lowercase =goal.position
__lowercase =(ya - ya) ** 2 + (xa - xa) ** 2
__lowercase =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCAmelCase )
__lowercase =[]
while current.parent is not None:
path.append(current.position )
__lowercase =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase = Gridworld()
# Start position and goal
lowerCamelCase = Cell()
lowerCamelCase = (0, 0)
lowerCamelCase = Cell()
lowerCamelCase = (4, 4)
print(f"path from {start.position} to {goal.position}")
lowerCamelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase = 1
print(world.w)
| 454 | 1 |
UpperCAmelCase_ : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Return True if there is node that has not iterated.
__magic_name__ : Any =[False] * len(lowerCamelCase )
__magic_name__ : Any =[s]
__magic_name__ : Tuple =True
while queue:
__magic_name__ : Any =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
__magic_name__ : Tuple =True
__magic_name__ : Optional[Any] =u
return visited[t]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =[-1] * (len(lowerCamelCase ))
__magic_name__ : str =0
__magic_name__ : Any =[]
__magic_name__ : Dict =[i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =float("""Inf""" )
__magic_name__ : List[Any] =sink
while s != source:
# Find the minimum value in select path
__magic_name__ : Dict =min(lowerCamelCase , graph[parent[s]][s] )
__magic_name__ : int =parent[s]
max_flow += path_flow
__magic_name__ : Optional[Any] =sink
while v != source:
__magic_name__ : List[Any] =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__magic_name__ : List[Any] =parent[v]
for i in range(len(lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 21 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int ) -> int:
__SCREAMING_SNAKE_CASE :Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __lowerCamelCase ( a_ : int ) -> int:
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
while number > 0:
__SCREAMING_SNAKE_CASE :str = number % 10
sum_of_digits += last_digit
__SCREAMING_SNAKE_CASE :Tuple = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowerCamelCase ( a_ : int = 1_00 ) -> int:
__SCREAMING_SNAKE_CASE :Union[str, Any] = factorial(a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = split_and_add(a_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip()))) | 498 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__A = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__A = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__A = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : int = CHRF.CHAR_ORDER , UpperCAmelCase : int = CHRF.WORD_ORDER , UpperCAmelCase : int = CHRF.BETA , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , ):
__lowerCamelCase : Any = len(references[0] )
if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__lowerCamelCase : Dict = [[refs[i] for refs in references] for i in range(UpperCAmelCase )]
__lowerCamelCase : List[str] = CHRF(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Any = sb_chrf.corpus_score(UpperCAmelCase , UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 700 | """simple docstring"""
import math
from datetime import datetime, timedelta
def lowercase_ ( _lowerCamelCase: int ) -> datetime:
'''simple docstring'''
__lowerCamelCase : List[Any] = year % 19
__lowerCamelCase : List[str] = year % 4
__lowerCamelCase : Dict = year % 7
__lowerCamelCase : Optional[Any] = math.floor(year / 100 )
__lowerCamelCase : Tuple = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : List[str] = leap_day_inhibits / 4
__lowerCamelCase : Optional[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Union[str, Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase , 4 , 18 )
else:
return datetime(_lowerCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__A = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""") | 366 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = set()
A__ = []
def parse_line(lowercase_ ):
for line in fp:
if isinstance(lowercase_ , lowercase_ ):
A__ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase_ ) > 0:
A__ = '''\n'''.join(lowercase_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowercase_ )
buffer.clear()
continue
else:
A__ = line.strip()
buffer.append(lowercase_ )
if from_gh:
for filename in os.listdir(lowercase_ ):
A__ = os.path.join(lowercase_ , lowercase_ )
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase_ ) as fp:
parse_line(lowercase_ )
else:
try:
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase_ ) as fp:
parse_line(lowercase_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = set()
A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return values.split(''',''' )
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets)
_lowerCamelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 87 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
super().__init__(*__A , **__A )
lowerCamelCase : Dict = {}
def _snake_case ( self , __A , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : int = super().add_tokens(__A , *__A , **__A )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def _snake_case ( self , __A , *__A , __A=1 , **__A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
else:
lowerCamelCase : Any = []
for i in range(__A ):
lowerCamelCase : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowerCamelCase : Tuple = output
def _snake_case ( self , __A , __A=False , __A=1.0 ):
"""simple docstring"""
if isinstance(__A , __A ):
lowerCamelCase : Optional[Any] = []
for i in range(len(__A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : Optional[int] = self.token_map[placeholder_token]
lowerCamelCase : str = tokens[: 1 + int(len(__A ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : List[str] = copy.copy(__A )
random.shuffle(__A )
lowerCamelCase : Any = text.replace(__A , " ".join(__A ) )
return text
def __call__( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
def _snake_case ( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
| 340 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
__lowerCamelCase : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCamelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_ , "rb" ) as f:
lowercase = Image.open(lowerCAmelCase_ )
return im.convert("RGB" )
@dataclass
class UpperCAmelCase :
UpperCAmelCase : Optional[str] = field(
default=_lowercase , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
UpperCAmelCase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ (self : Union[str, Any] ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCAmelCase :
UpperCAmelCase : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
UpperCAmelCase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowercase )} , )
UpperCAmelCase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase : Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=_lowercase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=_lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : bool = field(
default=_lowercase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = torch.stack([example["pixel_values"] for example in examples] )
lowercase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase = {}
if data_args.train_dir is not None:
lowercase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
lowercase = os.path.join(data_args.validation_dir , "**" )
lowercase = load_dataset(
"imagefolder" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
lowercase = dataset["train"].train_test_split(data_args.train_val_split )
lowercase = split["train"]
lowercase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = dataset["train"].features["labels"].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
lowercase = str(lowerCAmelCase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase = image_processor.size["shortest_edge"]
else:
lowercase = (image_processor.size["height"], image_processor.size["width"])
lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase = Compose(
[
RandomResizedCrop(lowerCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase = Compose(
[
Resize(lowerCAmelCase_ ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase_ ):
lowercase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(lowerCAmelCase_ ):
lowercase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase_ )
# Initalize our trainer
lowercase = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 459 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = "https://openaipublic.azureedge.net/jukebox/models/"
__lowerCamelCase : Tuple = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowercase = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowercase = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowercase = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = {}
import re
lowercase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowercase = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase_ ):
lowercase = re_encoder_block_conv_in.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowercase = re_encoder_block_conv_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase_ ):
lowercase = re_encoder_block_resnet.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = {"1": 1, "3": 2}[groups[-2]]
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_encoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase_ ):
lowercase = re_encoder_block_proj_out.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowercase = re_encoder_block_proj_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase_ ):
lowercase = re_decoder_block_conv_out.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowercase = re_decoder_block_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase_ ):
lowercase = re_decoder_block_resnet.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = {"1": 1, "3": 2}[groups[-2]]
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_decoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase_ ):
lowercase = re_decoder_block_proj_in.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowercase = re_decoder_block_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase_ ):
lowercase = re_prior_cond_conv_out.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowercase = re_prior_cond_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase_ ):
lowercase = re_prior_cond_resnet.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = {"1": 1, "3": 2}[groups[-2]]
lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_prior_cond_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase_ ):
lowercase = re_prior_cond_proj_in.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowercase = re_prior_cond_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# keep original key
else:
lowercase = original_key
lowercase = replace_key(lowerCAmelCase_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
lowercase = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowercase = original_key
lowercase = original_key
lowercase = value
return new_dict
@torch.no_grad()
def UpperCAmelCase_ ( lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowercase = requests.get(f'{PREFIX}{file}' , allow_redirects=lowerCAmelCase_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=lowerCAmelCase_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , "wb" ).write(r.content )
lowercase = MODEL_MAPPING[model_name.split("/" )[-1]]
lowercase = JukeboxConfig.from_pretrained(lowerCAmelCase_ )
lowercase = JukeboxModel(lowerCAmelCase_ )
lowercase = []
lowercase = {}
for i, dict_name in enumerate(lowerCAmelCase_ ):
lowercase = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"]
lowercase = {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowercase = old_dic[k]
elif k.endswith(".w" ):
lowercase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase = old_dic[k]
else:
lowercase = old_dic[k]
lowercase = "vqvae" if i == 0 else f'priors.{3 - i}'
lowercase = fix_jukebox_keys(lowerCAmelCase_ , model.state_dict() , lowerCAmelCase_ , lowerCAmelCase_ )
weight_dict.append(lowerCAmelCase_ )
lowercase = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , "w" ) as txtfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase_ )
return weight_dict
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 459 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
SCREAMING_SNAKE_CASE_ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
SCREAMING_SNAKE_CASE_ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def __UpperCAmelCase ( self : Optional[int] , snake_case : str , snake_case : int , snake_case : Dict=None , snake_case : List[str]="uniform_average" , snake_case : Any=True ):
"""simple docstring"""
_snake_case : str = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 517 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Union[str, Any] = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
lowercase : int = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
lowercase : Union[str, Any] = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = SqueezeBertTokenizer
def __init__( self : Optional[Any] , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : Optional[Any]=True , A_ : Optional[Any]="[UNK]" , A_ : List[str]="[SEP]" , A_ : List[str]="[PAD]" , A_ : List[str]="[CLS]" , A_ : int="[MASK]" , A_ : Dict=True , A_ : str=None , **A_ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCamelCase_: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A_ ) != tokenize_chinese_chars
):
lowerCamelCase_: List[str] = getattr(A_ , normalizer_state.pop("""type""" ) )
lowerCamelCase_: Dict = do_lower_case
lowerCamelCase_: Optional[Any] = strip_accents
lowerCamelCase_: str = tokenize_chinese_chars
lowerCamelCase_: List[Any] = normalizer_class(**A_ )
lowerCamelCase_: List[Any] = do_lower_case
def lowerCAmelCase ( self : Optional[Any] , A_ : List[str] , A_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_: Tuple = [self.sep_token_id]
lowerCamelCase_: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_: int = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 708 | def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(_UpperCAmelCase ) * abs(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 584 | 0 |
'''simple docstring'''
class _snake_case : # Public class to implement a graph
def __init__( self , a__ , a__ , a__ ) -> None:
'''simple docstring'''
snake_case_ = row
snake_case_ = col
snake_case_ = graph
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> None:
'''simple docstring'''
snake_case_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
snake_case_ = [-1, 0, 1, -1, 1, -1, 0, 1]
snake_case_ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a__ )
def lowerCAmelCase__ ( self ) -> int: # And finally, count all islands.
'''simple docstring'''
snake_case_ = [[False for j in range(self.COL )] for i in range(self.ROW )]
snake_case_ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a__ , a__ , a__ )
count += 1
return count
| 400 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = "▁"
_SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model ) + self.fairseq_offset
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = "".join(a__ ).replace(a__ , " " ).strip()
return out_string
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 400 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : str = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ : List[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ : Dict = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase_ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase_ : Union[str, Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase_ : Union[str, Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ : Tuple = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class __A ( UpperCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
UpperCAmelCase_ : Dict = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase_ : Union[str, Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase_ : List[Any] = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class __A :
def __call__( self :Dict , __snake_case :str , __snake_case :Optional[str] = None , __snake_case :Optional[str] = None , __snake_case :Union[bool, str] = False , __snake_case :Union[bool, str] = False , __snake_case :Optional[int] = None , __snake_case :Optional[Union[str, TensorType]] = None , __snake_case :Optional[bool] = None , **__snake_case :int , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , return_tensors=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
elif titles is None or texts is None:
__magic_name__ : str =titles if texts is None else texts
return super().__call__(
__snake_case , __snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , return_tensors=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
__magic_name__ : Any =titles if not isinstance(__snake_case , __snake_case ) else [titles]
__magic_name__ : Tuple =texts if not isinstance(__snake_case , __snake_case ) else [texts]
__magic_name__ : Any =len(__snake_case )
__magic_name__ : Optional[Any] =questions if not isinstance(__snake_case , __snake_case ) else [questions] * n_passages
assert len(__snake_case ) == len(
__snake_case ), f"There should be as many titles than texts but got {len(__snake_case )} titles and {len(__snake_case )} texts."
__magic_name__ : Any =super().__call__(__snake_case , __snake_case , padding=__snake_case , truncation=__snake_case )["""input_ids"""]
__magic_name__ : List[Any] =super().__call__(__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case )["""input_ids"""]
__magic_name__ : Any ={
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__snake_case , __snake_case )
]
}
if return_attention_mask is not False:
__magic_name__ : int =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__magic_name__ : Optional[Any] =attention_mask
return self.pad(__snake_case , padding=__snake_case , max_length=__snake_case , return_tensors=__snake_case )
def A__ ( self :Dict , __snake_case :BatchEncoding , __snake_case :DPRReaderOutput , __snake_case :int = 16 , __snake_case :int = 64 , __snake_case :int = 4 , ):
'''simple docstring'''
__magic_name__ : List[Any] =reader_input["""input_ids"""]
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] =reader_output[:3]
__magic_name__ : str =len(__snake_case )
__magic_name__ : List[str] =sorted(range(__snake_case ) , reverse=__snake_case , key=relevance_logits.__getitem__ )
__magic_name__ : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
__magic_name__ : int =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__magic_name__ : List[Any] =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__magic_name__ : Optional[Any] =sequence_ids.index(self.pad_token_id )
else:
__magic_name__ : Any =len(__snake_case )
__magic_name__ : List[str] =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__snake_case , top_spans=__snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__snake_case , start_index=__snake_case , end_index=__snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self :str , __snake_case :List[int] , __snake_case :List[int] , __snake_case :int , __snake_case :int , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[]
for start_index, start_score in enumerate(__snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__magic_name__ : Tuple =sorted(__snake_case , key=lambda __snake_case : x[1] , reverse=__snake_case )
__magic_name__ : str =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
__magic_name__ : Optional[Any] =end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 367 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Load configuration defined in the metadata file
with open(lowerCamelCase ) as metadata_file:
__magic_name__ : List[Any] =json.load(lowerCamelCase )
__magic_name__ : Optional[int] =LukeConfig(use_entity_aware_attention=lowerCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
__magic_name__ : List[str] =torch.load(lowerCamelCase , map_location="""cpu""" )
# Load the entity vocab file
__magic_name__ : Dict =load_entity_vocab(lowerCamelCase )
__magic_name__ : Union[str, Any] =RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
__magic_name__ : List[str] =AddedToken("""<ent>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
__magic_name__ : Union[str, Any] =AddedToken("""<ent2>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
__magic_name__ : Optional[Any] =LukeTokenizer.from_pretrained(lowerCamelCase )
# Initialize the embeddings of the special tokens
__magic_name__ : List[str] =state_dict["""embeddings.word_embeddings.weight"""]
__magic_name__ : Optional[int] =word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
__magic_name__ : Optional[int] =word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
__magic_name__ : Tuple =torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__magic_name__ : Optional[Any] =F"encoder.layer.{layer_index}.attention.self."
__magic_name__ : Optional[Any] =state_dict[prefix + matrix_name]
__magic_name__ : Any =state_dict[prefix + matrix_name]
__magic_name__ : Dict =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__magic_name__ : str =state_dict["""entity_embeddings.entity_embeddings.weight"""]
__magic_name__ : Union[str, Any] =entity_emb[entity_vocab["""[MASK]"""]]
__magic_name__ : Optional[int] =LukeModel(config=lowerCamelCase ).eval()
__magic_name__ , __magic_name__ : Any =model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if not (len(lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(lowerCamelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
__magic_name__ : Dict =LukeTokenizer.from_pretrained(lowerCamelCase , task="""entity_classification""" )
__magic_name__ : int =(
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
__magic_name__ : Optional[Any] =(39, 42)
__magic_name__ : Dict =tokenizer(lowerCamelCase , entity_spans=[span] , add_prefix_space=lowerCamelCase , return_tensors="""pt""" )
__magic_name__ : str =model(**lowerCamelCase )
# Verify word hidden states
if model_size == "large":
__magic_name__ : Tuple =torch.Size((1, 42, 1024) )
__magic_name__ : int =torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
__magic_name__ : Dict =torch.Size((1, 42, 768) )
__magic_name__ : List[Any] =torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__magic_name__ : Optional[Any] =torch.Size((1, 1, 1024) )
__magic_name__ : Tuple =torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
__magic_name__ : List[Any] =torch.Size((1, 1, 768) )
__magic_name__ : Dict =torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCamelCase ) )
model.save_pretrained(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple ={}
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(lowerCamelCase ):
__magic_name__ , __magic_name__ : Optional[Any] =line.rstrip().split("""\t""" )
__magic_name__ : Union[str, Any] =index
return entity_vocab
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
UpperCAmelCase_ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 367 | 1 |
'''simple docstring'''
from __future__ import annotations
class __UpperCamelCase :
def __init__( self :Optional[int] ,_UpperCamelCase :Optional[Any] ):
snake_case_ : str = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(_UpperCamelCase ) != 0:
snake_case_ : Any = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_UpperCamelCase ) != cols:
raise error
for value in row:
if not isinstance(_UpperCamelCase ,(int, float) ):
raise error
snake_case_ : List[str] = rows
else:
snake_case_ : Dict = []
def a__ ( self :Dict ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a__ ( self :Optional[Any] ):
return len(self.rows )
@property
def a__ ( self :int ):
return len(self.rows[0] )
@property
def a__ ( self :str ):
return (self.num_rows, self.num_columns)
@property
def a__ ( self :Union[str, Any] ):
return self.order[0] == self.order[1]
def a__ ( self :int ):
snake_case_ : Optional[Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_UpperCamelCase )
def a__ ( self :Union[str, Any] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a__ ( self :Optional[Any] ):
return bool(self.determinant() )
def a__ ( self :str ,_UpperCamelCase :str ,_UpperCamelCase :Optional[Any] ):
snake_case_ : Optional[int] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_UpperCamelCase ).determinant()
def a__ ( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :Tuple ):
if (row + column) % 2 == 0:
return self.get_minor(_UpperCamelCase ,_UpperCamelCase )
return -1 * self.get_minor(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Tuple ):
return Matrix(
[
[self.get_minor(_UpperCamelCase ,_UpperCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a__ ( self :int ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a__ ( self :List[Any] ):
snake_case_ : Optional[Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_UpperCamelCase )
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self :int ):
return str(self.rows )
def __str__( self :int ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(_UpperCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def a__ ( self :Dict ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str = None ):
snake_case_ : List[Any] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
raise type_error
for value in row:
if not isinstance(_UpperCamelCase ,(int, float) ):
raise type_error
if len(_UpperCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(_UpperCamelCase )
else:
snake_case_ : int = self.rows[0:position] + [row] + self.rows[position:]
def a__ ( self :Any ,_UpperCamelCase :Dict ,_UpperCamelCase :Dict = None ):
snake_case_ : Dict = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
raise type_error
for value in column:
if not isinstance(_UpperCamelCase ,(int, float) ):
raise type_error
if len(_UpperCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
snake_case_ : Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
snake_case_ : int = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self :List[Any] ,_UpperCamelCase :Optional[Any] ):
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self :Any ,_UpperCamelCase :Tuple ):
return not self == other
def __neg__( self :Optional[int] ):
return self * -1
def __add__( self :Optional[int] ,_UpperCamelCase :int ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self :Optional[int] ,_UpperCamelCase :Tuple ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self :Union[str, Any] ,_UpperCamelCase :Tuple ):
if isinstance(_UpperCamelCase ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(_UpperCamelCase ,_UpperCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self :Optional[Any] ,_UpperCamelCase :int ):
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
snake_case_ : Optional[int] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a__ ( cls :List[str] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[Any] ):
return sum(row[i] * column[i] for i in range(len(_UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 334 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 314 | 0 |
'''simple docstring'''
lowercase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [False] * len(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = [s]
__SCREAMING_SNAKE_CASE : List[str] = True
while queue:
__SCREAMING_SNAKE_CASE : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Tuple = u
return visited[t]
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [-1] * (len(_SCREAMING_SNAKE_CASE ))
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Any = [i[:] for i in graph] # Record original cut, copy.
while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Any = float("Inf" )
__SCREAMING_SNAKE_CASE : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
__SCREAMING_SNAKE_CASE : Optional[Any] = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
__SCREAMING_SNAKE_CASE : Optional[int] = parent[s]
max_flow += path_flow
__SCREAMING_SNAKE_CASE : str = sink
while v != source:
__SCREAMING_SNAKE_CASE : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__SCREAMING_SNAKE_CASE : int = parent[v]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 564 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model'''}
lowercase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowercase = '''▁'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [f'<extra_id_{i}>' for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__SCREAMING_SNAKE_CASE : int = legacy
__SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
__SCREAMING_SNAKE_CASE : Dict = vocab_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = extra_ids
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self ):
return list(
set(filter(lambda a__ : bool(re.search(R"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : List[str] = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE : Any = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__SCREAMING_SNAKE_CASE : str = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self , a__ , **a__ ):
if not self.legacy:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text.startswith(a__ )
if is_first:
__SCREAMING_SNAKE_CASE : str = text[1:]
__SCREAMING_SNAKE_CASE : List[str] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self , a__ ):
if token.startswith("<extra_id_" ):
__SCREAMING_SNAKE_CASE : Any = re.match(R"<extra_id_(\d+)>" , a__ )
__SCREAMING_SNAKE_CASE : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self , a__ ):
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : Any = self.sp_model.IdToPiece(a__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
__SCREAMING_SNAKE_CASE : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Any = []
else:
current_sub_tokens.append(a__ )
__SCREAMING_SNAKE_CASE : Dict = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
__SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 564 | 1 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowercase_ : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowercase_ : Any = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowercase_ : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
lowercase_ : Optional[int] = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
lowercase_ : Union[str, Any] = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
lowercase_ : Dict = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
lowercase_ : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
lowercase_ : Union[str, Any] = np.expand_dims(test_image, axis=0)
lowercase_ : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowercase_ : Optional[int] = '''Normal'''
if result[0][0] == 1:
lowercase_ : int = '''Abnormality detected'''
| 572 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , config_name=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(snake_case__ , config_name=snake_case__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained("gpt2" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_model_config(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case__ , snake_case__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = GenerationConfig()
_SCREAMING_SNAKE_CASE : str = {
"max_new_tokens": 1024,
"foo": "bar",
}
_SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = generation_config.update(**snake_case__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case__ , snake_case__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case__ , {"foo": "bar"} )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = GenerationConfig()
_SCREAMING_SNAKE_CASE : Dict = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig.from_pretrained(snake_case__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_model_config(snake_case__ )
assert not hasattr(snake_case__ , "foo" ) # no new kwargs should be initialized if from config
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , snake_case__ )
self.assertEqual(default_config.num_beams , 1 )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , snake_case__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_pretrained(snake_case__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , snake_case__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="test-generation-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=snake_case__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-generation-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
| 572 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ) -> List[Any]:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = True
UpperCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Tuple =True
a : Tuple =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=snake_case_ )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : str , _snake_case : str ) -> Tuple:
'''simple docstring'''
_A = RobertaPreLayerNormConfig.from_pretrained(
_snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_A = torch.load(hf_hub_download(repo_id=_snake_case , filename='pytorch_model.bin' ) )
_A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_A = tensor_value
_A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_snake_case , config=_snake_case , state_dict=_snake_case )
model.save_pretrained(_snake_case )
# convert tokenizer
_A = AutoTokenizer.from_pretrained(_snake_case )
tokenizer.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 7 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __a :
'''simple docstring'''
def __init__( self , _lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 32
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = "gelu"
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 512
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = None
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = TFEsmModel(config=_lowerCamelCase )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(_lowerCamelCase )
__lowercase = [input_ids, input_mask]
__lowercase = model(_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> str:
'''simple docstring'''
__lowercase = True
__lowercase = TFEsmModel(config=_lowerCamelCase )
__lowercase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
__lowercase = model(_lowerCamelCase )
__lowercase = [input_ids, input_mask]
__lowercase = model(_lowerCamelCase , encoder_hidden_states=_lowerCamelCase )
# Also check the case where encoder outputs are not passed
__lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = TFEsmForMaskedLM(config=_lowerCamelCase )
__lowercase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFEsmForTokenClassification(config=_lowerCamelCase )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __a ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : List[str] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = TFEsmModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEsmModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__lowercase = model.get_bias()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for k, v in name.items():
assert isinstance(_lowerCamelCase , tf.Variable )
else:
__lowercase = model.get_output_embeddings()
assert x is None
__lowercase = model.get_bias()
assert name is None
@require_tf
class __a ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_lowerCamelCase )[0]
__lowercase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _lowerCamelCase )
# compare the actual values for a slice.
__lowercase = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
__lowercase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase = model(_lowerCamelCase )[0]
# compare the actual values for a slice.
__lowercase = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 118 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = ['model.decoder.embed_positions.weights']
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
if "emb" in name:
snake_case : List[Any] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
snake_case : Dict = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
snake_case : Optional[int] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
snake_case : Union[str, Any] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
snake_case : Tuple = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
snake_case : Tuple = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
snake_case : Optional[int] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
snake_case : str = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
snake_case : str = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
snake_case : int = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case : Any = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
snake_case : Optional[int] = list(state_dict.keys() )
snake_case : List[str] = {}
for key in keys:
snake_case : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case : Any = val[:hidden_size, :]
snake_case : Dict = val[hidden_size : 2 * hidden_size, :]
snake_case : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case : Optional[Any] = val
else:
snake_case : Dict = val
return state_dict, enc_dec_proj_state_dict
def a_ ( __magic_name__ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
snake_case : Optional[Any] = 1_024
snake_case : str = 24
snake_case : Any = 16
elif checkpoint == "medium":
snake_case : Optional[Any] = 1_536
snake_case : Optional[int] = 48
snake_case : Union[str, Any] = 24
elif checkpoint == "large":
snake_case : Dict = 2_048
snake_case : Optional[Any] = 48
snake_case : str = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
snake_case : str = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
snake_case : str = decoder_config_from_checkpoint(__magic_name__ )
snake_case : int = fairseq_model.lm.state_dict()
snake_case : Any = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
snake_case : int = TaEncoderModel.from_pretrained('''t5-base''' )
snake_case : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
snake_case : Tuple = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case : Dict = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
snake_case : Any = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
snake_case : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case : Union[str, Any] = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
snake_case : List[Any] = AutoTokenizer.from_pretrained('''t5-base''' )
snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
snake_case : Any = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
snake_case : Union[str, Any] = 2_048
snake_case : str = 2_048
# set other default generation config params
snake_case : int = int(30 * audio_encoder.config.frame_rate )
snake_case : Optional[int] = True
snake_case : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_a : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 721 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = (DDPMScheduler,)
def lowercase_ ( self : Any , **__lowerCamelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_a )
return config
def lowercase_ ( self : List[Any] ) -> Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def lowercase_ ( self : List[str] ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def lowercase_ ( self : int ) -> Optional[int]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def lowercase_ ( self : Union[str, Any] ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def lowercase_ ( self : Any ) -> List[Any]:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def lowercase_ ( self : str ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def lowercase_ ( self : List[str] ) -> Any:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def lowercase_ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ = len(_a )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ = len(_a )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
SCREAMING_SNAKE_CASE__ = -1
else:
SCREAMING_SNAKE_CASE__ = timesteps[i + 1]
SCREAMING_SNAKE_CASE__ = scheduler.previous_timestep(_a )
SCREAMING_SNAKE_CASE__ = prev_t.item()
self.assertEqual(_a , _a )
def lowercase_ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 493 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : List[str] = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 405 | 0 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( A : Optional[int] ) -> Any:
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCAmelCase ( A : Tuple ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = np.max(_outputs , axis=-1 , keepdims=A )
UpperCAmelCase_ : Optional[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A )
class snake_case__ ( UpperCamelCase):
a_ = "sigmoid"
a_ = "softmax"
a_ = "none"
@add_end_docstrings(
UpperCamelCase , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class snake_case__ ( UpperCamelCase):
a_ = False
a_ = ClassificationFunction.NONE
def __init__( self : Dict , **_A : Optional[Any] ) -> int:
super().__init__(**_A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def A ( self : Dict , _A : Union[str, Any]=None , _A : Dict=None , _A : List[Any]="" , **_A : Tuple ) -> str:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
UpperCAmelCase_ : Optional[Any] = tokenizer_kwargs
UpperCAmelCase_ : Dict = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
UpperCAmelCase_ : List[Any] = self.model.config.return_all_scores
if isinstance(_A , _A ) or top_k is None:
UpperCAmelCase_ : List[Any] = top_k
UpperCAmelCase_ : Tuple = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _A , )
if return_all_scores:
UpperCAmelCase_ : Optional[Any] = None
else:
UpperCAmelCase_ : Tuple = 1
if isinstance(_A , _A ):
UpperCAmelCase_ : int = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
UpperCAmelCase_ : Union[str, Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , *_A : List[Any] , **_A : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : str = super().__call__(*_A , **_A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
UpperCAmelCase_ : Dict = '''top_k''' not in kwargs
if isinstance(args[0] , _A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def A ( self : Tuple , _A : List[Any] , **_A : List[Any] ) -> Dict[str, GenericTensor]:
UpperCAmelCase_ : Tuple = self.framework
if isinstance(_A , _A ):
return self.tokenizer(**_A , return_tensors=_A , **_A )
elif isinstance(_A , _A ) and len(_A ) == 1 and isinstance(inputs[0] , _A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_A , **_A )
elif isinstance(_A , _A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_A , return_tensors=_A , **_A )
def A ( self : Dict , _A : Dict ) -> Dict:
return self.model(**_A )
def A ( self : Dict , _A : List[str] , _A : Any=None , _A : List[Any]=1 , _A : Any=True ) -> str:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
UpperCAmelCase_ : List[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
UpperCAmelCase_ : int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
UpperCAmelCase_ : List[str] = self.model.config.function_to_apply
else:
UpperCAmelCase_ : Tuple = ClassificationFunction.NONE
UpperCAmelCase_ : str = model_outputs['''logits'''][0]
UpperCAmelCase_ : int = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
UpperCAmelCase_ : List[str] = sigmoid(_A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
UpperCAmelCase_ : Tuple = softmax(_A )
elif function_to_apply == ClassificationFunction.NONE:
UpperCAmelCase_ : Union[str, Any] = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
UpperCAmelCase_ : List[Any] = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_A )
]
if not _legacy:
dict_scores.sort(key=lambda _A : x["score"] , reverse=_A )
if top_k is not None:
UpperCAmelCase_ : Optional[int] = dict_scores[:top_k]
return dict_scores
| 711 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCAmelCase ( A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : List[Any] = job['''started_at''']
UpperCAmelCase_ : List[Any] = job['''completed_at''']
UpperCAmelCase_ : Optional[Any] = date_parser.parse(A )
UpperCAmelCase_ : List[Any] = date_parser.parse(A )
UpperCAmelCase_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase_ : Any = start
UpperCAmelCase_ : Dict = end
UpperCAmelCase_ : Tuple = duration_in_min
return job_info
def __UpperCAmelCase ( A : int , A : int=None ) -> List[str]:
UpperCAmelCase_ : Tuple = None
if token is not None:
UpperCAmelCase_ : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase_ : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase_ : Any = requests.get(A , headers=A ).json()
UpperCAmelCase_ : Any = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
UpperCAmelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A ):
UpperCAmelCase_ : List[str] = requests.get(url + F"&page={i + 2}" , headers=A ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_UpperCamelCase : str = parser.parse_args()
_UpperCamelCase : str = get_job_time(args.workflow_run_id)
_UpperCamelCase : Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 216 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = SMALL_MODEL_IDENTIFIER
lowerCAmelCase__ :List[Any] = 'pt'
lowerCAmelCase__ :Optional[Any] = 'tf'
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCAmelCase__ :Dict = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase__ :List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase__ :Any = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :int = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase__ :Optional[Any] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 93 |
from math import sqrt
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Optional[Any] =0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE ):
total += i
return total - n
def _A ( SCREAMING_SNAKE_CASE : int = 10_000 ):
"""simple docstring"""
a__ : List[Any] =sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 563 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 703 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
A : int = Mapping[str, np.ndarray]
A : Any = Mapping[str, Any] # Is a nested dict.
A : Dict = 0.01
@dataclasses.dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCamelCase__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCamelCase__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCamelCase__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCamelCase__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCamelCase__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCamelCase__ = None
# Templates used to generate this protein (prediction-only)
lowerCamelCase__ = None
# Chain corresponding to each parent
lowerCamelCase__ = None
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = r"(\[[A-Z]+\]\n)"
SCREAMING_SNAKE_CASE_ = [tag.strip() for tag in re.split(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0]
SCREAMING_SNAKE_CASE_ = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE_ = ["N", "CA", "C"]
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE_ = g[1][0].strip()
for i in range(len(__UpperCamelCase ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE_ = "X" # FIXME: strings are immutable
SCREAMING_SNAKE_CASE_ = np.array(
[residue_constants.restype_order.get(__UpperCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE_ = []
for axis in range(3 ):
tertiary.append(list(map(__UpperCamelCase , g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE_ = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE_ = np.zeros(
(
len(__UpperCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__UpperCamelCase , atom_mask=__UpperCamelCase , aatype=__UpperCamelCase , residue_index=np.arange(len(__UpperCamelCase ) ) , b_factors=__UpperCamelCase , )
def a__ ( __UpperCamelCase , __UpperCamelCase = 0 ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE_ = prot.parents
SCREAMING_SNAKE_CASE_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE_ = [p for i, p in zip(__UpperCamelCase , __UpperCamelCase ) if i == chain_id]
if parents is None or len(__UpperCamelCase ) == 0:
SCREAMING_SNAKE_CASE_ = ["N/A"]
pdb_headers.append(F'''PARENT {" ".join(__UpperCamelCase )}''' )
return pdb_headers
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = pdb_str.split("\n" )
SCREAMING_SNAKE_CASE_ = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE_ = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE_ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__UpperCamelCase ) , [] )
parent_dict[str(__UpperCamelCase )].append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = max([int(__UpperCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE_ = parent_dict.get(str(__UpperCamelCase ) , ["N/A"] )
parents_per_chain.append(__UpperCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE_ = [["N/A"]]
def make_parent_line(__UpperCamelCase ) -> str:
return F'''PARENT {" ".join(__UpperCamelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE_ = 0
for i, l in enumerate(__UpperCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__UpperCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE_ = ["N/A"]
out_pdb_lines.append(make_parent_line(__UpperCamelCase ) )
return "\n".join(__UpperCamelCase )
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = residue_constants.restypes + ["X"]
def res_atoa(__UpperCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
SCREAMING_SNAKE_CASE_ = residue_constants.atom_types
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = prot.atom_mask
SCREAMING_SNAKE_CASE_ = prot.aatype
SCREAMING_SNAKE_CASE_ = prot.atom_positions
SCREAMING_SNAKE_CASE_ = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE_ = prot.b_factors
SCREAMING_SNAKE_CASE_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
SCREAMING_SNAKE_CASE_ = get_pdb_headers(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
pdb_lines.extend(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = aatype.shape[0]
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = string.ascii_uppercase
SCREAMING_SNAKE_CASE_ = None
# Add all atom sites.
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__UpperCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE_ = "ATOM"
SCREAMING_SNAKE_CASE_ = atom_name if len(__UpperCamelCase ) == 4 else F''' {atom_name}'''
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = 1.00
SCREAMING_SNAKE_CASE_ = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = "A"
if chain_index is not None:
SCREAMING_SNAKE_CASE_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE_ = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(__UpperCamelCase )
atom_index += 1
SCREAMING_SNAKE_CASE_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE_ = "TER"
SCREAMING_SNAKE_CASE_ = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(__UpperCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__UpperCamelCase , __UpperCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__UpperCamelCase )
def a__ ( __UpperCamelCase ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__UpperCamelCase , remark=__UpperCamelCase , parents=__UpperCamelCase , parents_chain_index=__UpperCamelCase , )
| 356 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class snake_case_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = RoFormerTokenizer
lowerCamelCase = RoFormerTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
super().setUp()
def __SCREAMING_SNAKE_CASE ( self : int , **__magic_name__ : List[str] ) -> Tuple:
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__magic_name__ : Dict ) -> int:
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
lowerCamelCase_ : str = "永和服装饰品有限公司,今天天气非常好"
lowerCamelCase_ : Optional[int] = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ , lowerCamelCase_ : Tuple = self.get_chinese_input_output_texts()
lowerCamelCase_ : Any = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , output_text.split() )
lowerCamelCase_ : List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase_ : str = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
lowerCamelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = self.get_chinese_input_output_texts()
lowerCamelCase_ : int = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , output_text.split() )
lowerCamelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
lowerCamelCase_ : Dict = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
pass
| 488 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case_ : List[str] = logging.get_logger(__name__)
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 212 | 0 |
def __lowercase ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowercase ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 | from collections.abc import Sequence
def __lowercase ( _UpperCAmelCase = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
__lowercase = nums[0]
for i in range(1 , len(_UpperCAmelCase ) ):
__lowercase = nums[i]
__lowercase = max(_UpperCAmelCase , ans + num , _UpperCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase__ = int(input('Enter number of elements : ').strip())
lowerCAmelCase__ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 576 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( lowerCAmelCase__ , unittest.TestCase ):
A__ : List[str] =XLMRobertaTokenizer
A__ : List[Any] =XLMRobertaTokenizerFast
A__ : Dict =True
A__ : Optional[Any] =True
def A_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = '<pad>'
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case_ ) , 1002 )
def A_ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A_ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
@cached_property
def A_ ( self : Any ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def A_ ( self : Any ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case_ , f.name )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(f.name , keep_accents=snake_case_ )
SCREAMING_SNAKE_CASE__ = pickle.dumps(snake_case_ )
pickle.loads(snake_case_ )
def A_ ( self : str ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(snake_case_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(snake_case_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 'Hello World!'
SCREAMING_SNAKE_CASE__ = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 472 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , **snake_case_ , ) -> Dict:
'''simple docstring'''
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__lowercase = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
__lowercase = Text(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , **snake_case_ , )
def A ( self ) -> int:
'''simple docstring'''
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
| 639 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _a ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A ( lowerCamelCase_ , lowerCamelCase_ ):
@register_to_config
def __init__( self : Optional[int] , __UpperCAmelCase : int = 128 , __UpperCAmelCase : int = 256 , __UpperCAmelCase : float = 2_000.0 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : int = 2048 , __UpperCAmelCase : float = 0.1 , ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Sequential(
nn.Linear(__UpperCAmelCase , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , )
UpperCamelCase_ = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = False
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(p=__UpperCAmelCase )
UpperCamelCase_ = nn.ModuleList()
for lyr_num in range(__UpperCAmelCase ):
# FiLM conditional T5 decoder
UpperCamelCase_ = DecoderLayer(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
self.decoders.append(__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(p=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
def lowercase__ ( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase_ = self.conditioning_emb(__UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase_ = torch.broadcast_to(
torch.arange(__UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase_ = self.position_encoding(__UpperCAmelCase )
UpperCamelCase_ = self.continuous_inputs_projection(__UpperCAmelCase )
inputs += position_encodings
UpperCamelCase_ = self.dropout(__UpperCAmelCase )
# decoder: No padding present.
UpperCamelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase_ = [(x, self.encoder_decoder_mask(__UpperCAmelCase , __UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase_ = lyr(
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )[0]
UpperCamelCase_ = self.decoder_norm(__UpperCAmelCase )
UpperCamelCase_ = self.post_dropout(__UpperCAmelCase )
UpperCamelCase_ = self.spec_out(__UpperCAmelCase )
return spec_out
class A ( nn.Module ):
def __init__( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : str=1E-6 ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase ) )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.layer[0](
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
if encoder_hidden_states is not None:
UpperCamelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase_ = self.layer[1](
__UpperCAmelCase , key_value_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase_ = self.layer[-1](__UpperCAmelCase , __UpperCAmelCase )
return (hidden_states,)
class A ( nn.Module ):
def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase )
UpperCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
UpperCamelCase_ = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[Any]=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
UpperCamelCase_ = self.FiLMLayer(__UpperCAmelCase , __UpperCAmelCase )
# Self-attention block
UpperCamelCase_ = self.attention(__UpperCAmelCase )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=None , ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
UpperCamelCase_ = self.attention(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return layer_output
class A ( nn.Module ):
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = TaDenseGatedActDense(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
UpperCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
UpperCamelCase_ = self.film(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = self.DenseReluDense(__UpperCAmelCase )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
UpperCamelCase_ = NewGELUActivation()
def lowercase__ ( self : List[str] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.act(self.wi_a(__UpperCAmelCase ) )
UpperCamelCase_ = self.wi_a(__UpperCAmelCase )
UpperCamelCase_ = hidden_gelu * hidden_linear
UpperCamelCase_ = self.dropout(__UpperCAmelCase )
UpperCamelCase_ = self.wo(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=1E-6 ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.ones(__UpperCAmelCase ) )
UpperCamelCase_ = eps
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCAmelCase )
UpperCamelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A ( nn.Module ):
def lowercase__ ( self : List[Any] , __UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__UpperCAmelCase , 3.0 )) ))
class A ( nn.Module ):
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , out_features * 2 , bias=__UpperCAmelCase )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.scale_bias(__UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = torch.chunk(__UpperCAmelCase , 2 , -1 )
UpperCamelCase_ = x * (1 + scale) + shift
return x
| 559 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Union[str, Any] = logging.get_logger(__name__)
__a : List[Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = '''falcon'''
_SCREAMING_SNAKE_CASE : Tuple = ['''past_key_values''']
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]=65024 , __UpperCAmelCase : int=4544 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : Optional[int]=71 , __UpperCAmelCase : Dict=1E-5 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=11 , __UpperCAmelCase : Optional[int]=11 , **__UpperCAmelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase_ = kwargs.pop('n_embed' , __UpperCAmelCase )
UpperCamelCase_ = hidden_size if n_embed is None else n_embed
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = use_cache
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase_ = alibi
UpperCamelCase_ = new_decoder_architecture
UpperCamelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase_ = parallel_attn
UpperCamelCase_ = bias
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return not self.alibi
| 559 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {}
class _UpperCamelCase ( _UpperCamelCase):
__lowerCamelCase = "llama"
__lowerCamelCase = ["past_key_values"]
def __init__(self , lowerCamelCase__=3_2_0_0_0 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=1_1_0_0_8 , lowerCamelCase__=3_2 , lowerCamelCase__=3_2 , lowerCamelCase__=None , lowerCamelCase__="silu" , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-6 , lowerCamelCase__=True , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=False , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
A__ = num_attention_heads
A__ = num_key_value_heads
A__ = hidden_act
A__ = initializer_range
A__ = rms_norm_eps
A__ = pretraining_tp
A__ = use_cache
A__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ , )
def A (self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"""got {self.rope_scaling}""" )
A__ = self.rope_scaling.get("""type""" , lowerCamelCase__ )
A__ = self.rope_scaling.get("""factor""" , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 574 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = "imagenet-1k-id2label.json"
lowercase__ : Any = 1_000
lowercase__ : Union[str, Any] = "huggingface/label-files"
lowercase__ : Dict = num_labels
lowercase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) ) , "r" ) )
lowercase__ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : str = CvtConfig(num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : Union[str, Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : str = [2, 2, 20]
lowercase__ : Optional[int] = [3, 12, 16]
lowercase__ : str = [192, 768, 1_024]
lowercase__ : str = CvtForImageClassification(lowerCamelCase__ )
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict = image_size
lowercase__ : Tuple = torch.load(lowerCamelCase__ , map_location=torch.device("cpu" ) )
lowercase__ : Any = OrderedDict()
lowercase__ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Union[str, Any] = list_of_state_dict + cls_token(lowerCamelCase__ )
lowercase__ : str = list_of_state_dict + embeddings(lowerCamelCase__ )
for cnt in range(config.depth[idx] ):
lowercase__ : str = list_of_state_dict + attention(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 496 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
UpperCAmelCase = '''=======
>>>>>>>
'''
UpperCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
UpperCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case ):
lowercase = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=snake_case , required=snake_case , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=snake_case , required=snake_case , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , snake_case , *snake_case ):
lowercase = get_logger('datasets-cli/converting' )
lowercase = tfds_path
lowercase = datasets_directory
def SCREAMING_SNAKE_CASE__ ( self ):
if os.path.isdir(self._tfds_path ):
lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase = []
lowercase = []
lowercase = {}
if os.path.isdir(self._tfds_path ):
lowercase = os.listdir(snake_case )
else:
lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase = os.path.join(snake_case , snake_case )
lowercase = os.path.join(snake_case , snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(snake_case , encoding='utf-8' ) as f:
lowercase = f.readlines()
lowercase = []
lowercase = False
lowercase = False
lowercase = []
for line in lines:
lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
lowercase = ''
continue
elif "from absl import logging" in out_line:
lowercase = 'from datasets import logging\n'
elif "getLogger" in out_line:
lowercase = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase = True
lowercase = list(filter(lambda snake_case : e in out_line , snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + '\n' )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase = re.sub(snake_case , snake_case , snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
lowercase = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase = f_name.replace('.py' , '' )
lowercase = os.path.join(snake_case , snake_case )
lowercase = os.path.join(snake_case , snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.writelines(snake_case )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase = os.path.basename(snake_case )
lowercase = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(snake_case , snake_case )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 565 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase = 128
elif "12-12" in model_name:
lowercase = 12
lowercase = 12
elif "14-14" in model_name:
lowercase = 14
lowercase = 14
elif "16-16" in model_name:
lowercase = 16
lowercase = 16
else:
raise ValueError('Model not supported' )
lowercase = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase = 35
lowercase = 'speech-commands-v2-id2label.json'
else:
lowercase = 527
lowercase = 'audioset-id2label.json'
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "module.v" in name:
lowercase = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowercase = key.split('.' )
lowercase = int(key_split[3] )
lowercase = config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = get_audio_spectrogram_transformer_config(__SCREAMING_SNAKE_CASE )
lowercase = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase = model_name_to_url[model_name]
lowercase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# remove some keys
remove_keys(__SCREAMING_SNAKE_CASE )
# rename some keys
lowercase = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load 🤗 model
lowercase = ASTForAudioClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
lowercase = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
lowercase = 1024 if 'speech-commands' not in model_name else 128
lowercase = ASTFeatureExtractor(mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
lowercase = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase = dataset[0]['audio']['array']
else:
lowercase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase , lowercase = torchaudio.load(__SCREAMING_SNAKE_CASE )
lowercase = waveform.squeeze().numpy()
lowercase = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors='pt' )
# forward pass
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 565 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.