code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase_()-> str:
plt.scatter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , color="""red""" )
plt.plot(__SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(__SCREAMING_SNAKE_CASE ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 338 | """simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase_ = '''path-to-your-trained-model'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCAmelCase_ = '''A photo of sks dog in a bucket'''
lowerCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 338 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : Optional[int] = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 308 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 'git_vision_model'
def __init__( self : Optional[Any] , snake_case : Any=768 , snake_case : List[str]=3072 , snake_case : Optional[Any]=12 , snake_case : Optional[Any]=12 , snake_case : Tuple=3 , snake_case : str=224 , snake_case : Tuple=16 , snake_case : Union[str, Any]="quick_gelu" , snake_case : Dict=1E-5 , snake_case : int=0.0 , snake_case : Union[str, Any]=0.02 , **snake_case : int , ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : str = hidden_act
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , snake_case : Union[str, os.PathLike] , **snake_case : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : int = 'git'
def __init__( self : Union[str, Any] , snake_case : str=None , snake_case : List[str]=30522 , snake_case : Optional[Any]=768 , snake_case : Optional[Any]=6 , snake_case : Union[str, Any]=12 , snake_case : Union[str, Any]=3072 , snake_case : Dict="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[Any]=0.1 , snake_case : str=1024 , snake_case : Tuple=0.02 , snake_case : Dict=1E-12 , snake_case : List[str]=0 , snake_case : Optional[int]="absolute" , snake_case : Optional[int]=True , snake_case : Optional[int]=False , snake_case : Optional[Any]=101 , snake_case : Optional[int]=102 , snake_case : int=None , **snake_case : Any , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : int = tie_word_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = num_image_with_embedding
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output | 308 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
lowercase__ : Optional[int] = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowercase__ : Optional[int] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowercase__ : int = primes[:idx]
break
lowercase__ , lowercase__ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase__ : Optional[int] = False
for r in range(__lowerCamelCase ):
lowercase__ : str = pow(__lowerCamelCase , d * 2**r , __lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase__ : Dict = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __UpperCAmelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 560 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "dpt"
def __init__( self : List[str] ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : int=12 ,_snake_case : List[str]=3_072 ,_snake_case : List[str]="gelu" ,_snake_case : str=0.0 ,_snake_case : int=0.0 ,_snake_case : Optional[Any]=0.02 ,_snake_case : Any=1e-12 ,_snake_case : Tuple=384 ,_snake_case : int=16 ,_snake_case : Tuple=3 ,_snake_case : Optional[int]=False ,_snake_case : int=True ,_snake_case : Optional[int]=[2, 5, 8, 11] ,_snake_case : List[str]="project" ,_snake_case : Any=[4, 2, 1, 0.5] ,_snake_case : Union[str, Any]=[96, 192, 384, 768] ,_snake_case : List[str]=256 ,_snake_case : int=-1 ,_snake_case : Any=False ,_snake_case : List[Any]=True ,_snake_case : Tuple=0.4 ,_snake_case : int=255 ,_snake_case : Dict=0.1 ,_snake_case : Dict=[1, 1_024, 24, 24] ,_snake_case : Optional[Any]=[0, 1] ,_snake_case : List[str]=None ,**_snake_case : Optional[int] ,) -> Optional[int]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__ : Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
lowercase__ : Dict = BitConfig(**_snake_case )
elif isinstance(_snake_case ,_snake_case ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__ : Tuple = BitConfig(**_snake_case )
elif isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowercase__ : Optional[Any] = backbone_featmap_shape
lowercase__ : Tuple = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowercase__ : List[str] = None
lowercase__ : Any = None
lowercase__ : Dict = []
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Any = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[Any] = qkv_bias
lowercase__ : Any = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowercase__ : str = readout_type
lowercase__ : Union[str, Any] = reassemble_factors
lowercase__ : int = neck_hidden_sizes
lowercase__ : List[str] = fusion_hidden_size
lowercase__ : Optional[int] = head_in_index
lowercase__ : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase__ : Tuple = use_auxiliary_head
lowercase__ : List[str] = auxiliary_loss_weight
lowercase__ : Tuple = semantic_loss_ignore_index
lowercase__ : Tuple = semantic_classifier_dropout
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ : List[Any] = self.backbone_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 560 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_ : Any = os.path.join(args.tf_model_dir , 'parameters.json' )
UpperCAmelCase_ : Optional[Any] = json.loads(open(lowerCamelCase_ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
UpperCAmelCase_ : List[Any] = args.output + '.pt'
UpperCAmelCase_ : List[str] = OrderedDict()
with tf.device('/CPU:0' ):
UpperCAmelCase_ : Tuple = tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase_ : str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase_ : Any = reader.get_tensor(lowerCamelCase_ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
UpperCAmelCase_ : Dict = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
UpperCAmelCase_ : Optional[Any] = 8
UpperCAmelCase_ : Optional[int] = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('model/moe' ):
UpperCAmelCase_ : Optional[Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
UpperCAmelCase_ : List[str] = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
UpperCAmelCase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/softmlp/kernel' ):
UpperCAmelCase_ : List[Any] = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
UpperCAmelCase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
UpperCAmelCase_ : Dict = key_name[-9:-7]
for i in range(16 ):
UpperCAmelCase_ : List[str] = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
UpperCAmelCase_ : List[str] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('model/mlp' ):
UpperCAmelCase_ : str = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
UpperCAmelCase_ : int = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
UpperCAmelCase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/p1/bias' ):
UpperCAmelCase_ : str = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
UpperCAmelCase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : int = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/p2/kernel' ):
UpperCAmelCase_ : Optional[int] = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
UpperCAmelCase_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/p2/bias' ):
UpperCAmelCase_ : List[Any] = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
UpperCAmelCase_ : List[Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('model/ln' ):
UpperCAmelCase_ : List[str] = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
UpperCAmelCase_ : str = 'model.blocks.%d.feed_forward.norm.bias' % player
UpperCAmelCase_ : Dict = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/g' ):
UpperCAmelCase_ : Any = 'model.blocks.%d.feed_forward.norm.weight' % player
UpperCAmelCase_ : Tuple = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : Any = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('model/att' ):
UpperCAmelCase_ : Union[str, Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
UpperCAmelCase_ : Dict = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase_ : Optional[int] = state[:, 0, :, :]
UpperCAmelCase_ : Dict = state[:, 1, :, :]
UpperCAmelCase_ : Tuple = state[:, 2, :, :]
UpperCAmelCase_ : List[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : str = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Union[str, Any] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
UpperCAmelCase_ : Any = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/o/kernel' ):
UpperCAmelCase_ : List[Any] = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
UpperCAmelCase_ : Dict = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('model/an' ):
UpperCAmelCase_ : Optional[int] = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
UpperCAmelCase_ : Any = 'model.blocks.%d.self_attn.norm.bias' % player
UpperCAmelCase_ : List[Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : Dict = torch.tensor(lowerCamelCase_ )
elif key_name.endswith('/g' ):
UpperCAmelCase_ : Optional[int] = 'model.blocks.%d.self_attn.norm.weight' % player
UpperCAmelCase_ : Tuple = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCamelCase_ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
UpperCAmelCase_ : Optional[int] = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
UpperCAmelCase_ : int = 'model.%s.weight' % nlayer
UpperCAmelCase_ : Optional[Any] = vnp.copy() # same in embedded
UpperCAmelCase_ : int = torch.tensor(lowerCamelCase_ )
if key_name.startswith('model/wte' ):
UpperCAmelCase_ : str = 'lm_head.weight'
UpperCAmelCase_ : int = vnp.copy() # same in embedded
UpperCAmelCase_ : Dict = torch.tensor(lowerCamelCase_ )
elif key_name.startswith('model/wob' ):
UpperCAmelCase_ : str = 'final_logits_bias'
UpperCAmelCase_ : List[str] = vnp.copy() # same in embedded
UpperCAmelCase_ : int = state.reshape((1, -1) )
UpperCAmelCase_ : int = torch.tensor(lowerCamelCase_ )
elif key_name == "model/dense/kernel":
UpperCAmelCase_ : Union[str, Any] = 'model.last_project.weight'
UpperCAmelCase_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
elif key_name == "model/dense_1/bias":
UpperCAmelCase_ : Any = 'model.last_project.bias'
UpperCAmelCase_ : Optional[Any] = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCamelCase_ )
torch.save(lowerCamelCase_ , args.output )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
snake_case__ : Optional[int] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 712 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : int = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = '''mgp-str'''
def __init__( self , snake_case_=[3_2, 1_2_8] , snake_case_=4 , snake_case_=3 , snake_case_=2_7 , snake_case_=3_8 , snake_case_=5_0_2_5_7 , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=4.0 , snake_case_=True , snake_case_=False , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=False , snake_case_=0.02 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = max_token_length
UpperCAmelCase_ : Union[str, Any] = num_character_labels
UpperCAmelCase_ : Union[str, Any] = num_bpe_labels
UpperCAmelCase_ : Optional[int] = num_wordpiece_labels
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : Any = distilled
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[Any] = drop_rate
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : List[str] = attn_drop_rate
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : List[Any] = output_aa_attentions
UpperCAmelCase_ : Optional[int] = initializer_range
| 389 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''time_series_transformer'''
_lowercase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: str , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "student_t" , UpperCamelCase_: str = "nll" , UpperCamelCase_: int = 1 , UpperCamelCase_: List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase_: Optional[Union[str, bool]] = "mean" , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 32 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "gelu" , UpperCamelCase_: int = 64 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: float = 0.1 , UpperCamelCase_: int = 100 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: List[Any]=True , **UpperCamelCase_: List[str] , ) -> str:
"""simple docstring"""
lowercase__ = prediction_length
lowercase__ = context_length or prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = cardinality
else:
lowercase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(UpperCamelCase_ ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''lxmert'''
UpperCAmelCase__ : Any = {}
def __init__( self :Dict ,__snake_case :Optional[Any]=3_05_22 ,__snake_case :int=7_68 ,__snake_case :int=12 ,__snake_case :Any=95_00 ,__snake_case :Union[str, Any]=16_00 ,__snake_case :str=4_00 ,__snake_case :Optional[Any]=30_72 ,__snake_case :List[str]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Dict=5_12 ,__snake_case :str=2 ,__snake_case :List[str]=0.02 ,__snake_case :Optional[int]=1E-12 ,__snake_case :Any=9 ,__snake_case :List[str]=5 ,__snake_case :Optional[Any]=5 ,__snake_case :str=20_48 ,__snake_case :Optional[Any]=4 ,__snake_case :str=6.67 ,__snake_case :Union[str, Any]=True ,__snake_case :str=True ,__snake_case :int=True ,__snake_case :List[str]=True ,__snake_case :List[Any]=True ,__snake_case :str=True ,__snake_case :List[str]=True ,**__snake_case :Optional[Any] ,) -> str:
a__ = vocab_size
a__ = hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = num_qa_labels
a__ = num_object_labels
a__ = num_attr_labels
a__ = l_layers
a__ = x_layers
a__ = r_layers
a__ = visual_feat_dim
a__ = visual_pos_dim
a__ = visual_loss_normalizer
a__ = task_matched
a__ = task_mask_lm
a__ = task_obj_predict
a__ = task_qa
a__ = visual_obj_loss
a__ = visual_attr_loss
a__ = visual_feat_loss
a__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__snake_case )
| 335 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase_ : List[Any] = StableDiffusionInpaintPipeline
UpperCAmelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase_ : Optional[Any] = frozenset([] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(lowercase_ )
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : Tuple=0 ) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Tuple = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((64, 64) )
UpperCAmelCase : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(lowercase_ ).startswith('mps' ):
UpperCAmelCase : int = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = StableDiffusionInpaintPipeline(**lowercase_ )
UpperCAmelCase : List[str] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Union[str, Any] = sd_pipe(**lowercase_ ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
UpperCAmelCase : int = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : Any = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Any = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
UpperCAmelCase : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : Tuple = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Dict = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : List[str] = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A ( lowercase__ : str ) -> Any: # picklable for multiprocessing
return x.sum()
def A ( lowercase__ : Dict ) -> List[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : int
_snake_case : str
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[str] = {}
UpperCamelCase__ :Dict = []
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Optional[Any] = [1, 2]
UpperCamelCase__ :Dict = {"""a""": 1, """b""": 2}
UpperCamelCase__ :Dict = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase__ :List[Any] = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase__ :Optional[Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Optional[Any] = [2, 3]
UpperCamelCase__ :Dict = {"""a""": 2, """b""": 3}
UpperCamelCase__ :int = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase__ :Optional[int] = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase__ :Union[str, Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
UpperCamelCase__ :Dict = 2
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
UpperCamelCase__ :int = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
UpperCamelCase__ :int = {"""a""": 2, """b""": 0, """c""": 2}
UpperCamelCase__ :Optional[int] = {
"""a""": np.eye(2 ).astype(lowerCamelCase__ ),
"""b""": np.zeros(3 ).astype(lowerCamelCase__ ),
"""c""": np.ones(2 ).astype(lowerCamelCase__ ),
}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda
map_nested(lambda lowerCamelCase__ : x + 1 , lowerCamelCase__ , num_proc=lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :List[str] = {"""a""": 1, """b""": 2}
UpperCamelCase__ :Tuple = {"""a""": 3, """b""": 4}
UpperCamelCase__ :Optional[Any] = {"""a""": 5, """b""": 6}
UpperCamelCase__ :Union[str, Any] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) , lowerCamelCase__ )
def __a ( self :Optional[Any] ):
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : List[str] = """bar"""
UpperCamelCase__ :List[str] = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(lowerCamelCase__ , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A ( lowercase__ : str , lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
UpperCamelCase__ :Optional[int] = {f"""{i}""": i for i in range(lowercase__ )}
UpperCamelCase__ :Any = map_nested(lambda lowercase__ : x + 10 , lowercase__ , num_proc=lowercase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@require_tf
def __a ( self :str ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase__ :List[Any] = layers.Dense(2 )
def gen_random_output():
UpperCamelCase__ :str = tf.random.uniform((1, 3) )
return model(lowerCamelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
UpperCamelCase__ :Tuple = gen_random_output()
UpperCamelCase__ :Any = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __a ( self :Union[str, Any] ):
import torch
def gen_random_output():
UpperCamelCase__ :Optional[int] = torch.nn.Linear(3 , 2 )
UpperCamelCase__ :List[Any] = torch.rand(1 , 3 )
return model(lowerCamelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = gen_random_output()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
UpperCamelCase__ :Union[str, Any] = gen_random_output()
UpperCamelCase__ :Optional[Any] = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __a ( self :List[Any] ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase__ :Optional[int] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase__ :str = gen_random_output()
UpperCamelCase__ :Any = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def A ( lowercase__ : Dict ) -> Union[str, Any]:
UpperCamelCase__ :Tuple = NestedDataStructure(lowercase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def A ( lowercase__ : str , lowercase__ : Union[str, Any] ) -> List[Any]:
UpperCamelCase__ :str = NestedDataStructure(lowercase__ ).flatten()
assert output == expected_output
def A ( ) -> Optional[Any]:
UpperCamelCase__ :Optional[int] = A(x=1 , y="""foobar""" )
UpperCamelCase__ :Dict = {"""x""": 1, """y""": """foobar"""}
assert asdict(lowercase__ ) == expected_output
UpperCamelCase__ :Union[str, Any] = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
UpperCamelCase__ :Dict = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(lowercase__ ) == expected_output
with pytest.raises(lowercase__ ):
asdict([1, A(x=10 , y="""foo""" )] )
def A ( lowercase__ : str ) -> List[str]:
return text.split()
def A ( lowercase__ : Any ) -> str:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase__ :List[Any] = list(iflatmap_unordered(lowercase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(lowercase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase__ :Union[str, Any] = list(iflatmap_unordered(lowercase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(lowercase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase__ :Tuple = []
for yield_time, content in iflatmap_unordered(
lowercase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowercase__ )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(lowercase__ ) == 4 | 45 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roberta-prelayernorm'
def __init__( self : Tuple , lowerCamelCase : Tuple=5_02_65 , lowerCamelCase : Optional[int]=7_68 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : int=30_72 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=5_12 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Any=1E-12 , lowerCamelCase : int=1 , lowerCamelCase : List[Any]=0 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]="absolute" , lowerCamelCase : List[Any]=True , lowerCamelCase : Tuple=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 275 | 0 |
from __future__ import annotations
lowerCAmelCase__: List[str] = 8.988e9 # units = N * m^s * C^-2
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict[str, float]:
SCREAMING_SNAKE_CASE_ : Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
SCREAMING_SNAKE_CASE_ : List[str] = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(__UpperCAmelCase )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
class snake_case_ :
__lowerCamelCase : Any = None
@experimental
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return _map_with_joblib(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_proc if num_proc <= len(SCREAMING_SNAKE_CASE ) else len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = [] # We organize the splits ourselve (contiguous splits)
for index in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE ) // num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) % num_proc
SCREAMING_SNAKE_CASE_ : List[Any] = div * index + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(SCREAMING_SNAKE_CASE )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (RLock(),), tqdm.set_lock
with Pool(SCREAMING_SNAKE_CASE , initargs=SCREAMING_SNAKE_CASE , initializer=SCREAMING_SNAKE_CASE ) as pool:
SCREAMING_SNAKE_CASE_ : Optional[int] = pool.map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(f'Finished {num_proc} processes' )
SCREAMING_SNAKE_CASE_ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(SCREAMING_SNAKE_CASE )} objects' )
return mapped
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ : Dict = None
| 311 | 0 |
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
# Return True if there is node that has not iterated.
__a : int = [False] * len(lowerCamelCase_ )
__a : Optional[int] = []
queue.append(lowerCamelCase_ )
__a : List[str] = True
while queue:
__a : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase_ )
__a : Dict = True
__a : int = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
# This array is filled by BFS and to store path
__a : List[str] = [-1] * (len(lowerCamelCase_ ))
__a : int = 0
while bfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
__a : List[Any] = float('Inf' )
__a : List[Any] = sink
while s != source:
# Find the minimum value in select path
__a : List[str] = min(lowerCamelCase_ , graph[parent[s]][s] )
__a : List[str] = parent[s]
max_flow += path_flow
__a : int = sink
while v != source:
__a : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a : Optional[Any] = parent[v]
return max_flow
SCREAMING_SNAKE_CASE__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , a % b )
__lowercase = a // b
return (y, x - k * y)
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
if b < 0:
__lowercase = (b % n + n) % n
return b
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True) | 703 | import random
from typing import Any
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
for _ in range(len(lowercase ) ):
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
__a : List[str] = [0, 1, 2, 3, 4, 5, 6, 7]
__a : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 522 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
UpperCAmelCase_ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase_ : Optional[Any] = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase_ : str = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Union[str, Any] = Path(tmpdirname)
UpperCAmelCase_ : Tuple = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
UpperCAmelCase_ : List[str] = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
UpperCAmelCase_ : Dict = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
UpperCAmelCase_ : Dict = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase_ : List[Any] = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase_ : List[Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
UpperCAmelCase_ : Tuple = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCAmelCase_ : Any = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 491 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ : Any = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCAmelCase_ : Union[str, Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCAmelCase_ : Optional[int] = "|".join(sys.argv[1:])
UpperCAmelCase_ : List[Any] = re.compile(RF'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase_ : List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 491 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase__ = 637_8137.0
lowercase__ = 635_6752.31_4245
lowercase__ = 6378137
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
__a : Tuple = (AXIS_A - AXIS_B) / AXIS_A
__a : str = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
__a : Tuple = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
__a : Dict = radians(_lowerCamelCase )
__a : Any = radians(_lowerCamelCase )
# Equation
__a : Optional[int] = sin((phi_a - phi_a) / 2 )
__a : Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__a : Optional[Any] = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _UpperCamelCase ( _A = 1_0_0_0_0_0_0 , _A = 1_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase = defaultdict(_A )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_UpperCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_A , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"{solution() = }") | 555 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if not isinstance(_A , _A ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 555 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
snake_case_ : Optional[Any] = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE__ , filename="""pytorch_model.bin""" ) )
snake_case_ : int = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
snake_case_ : List[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
snake_case_ : Dict = tensor_value
snake_case_ : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ , state_dict=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert tokenizer
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 48 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= "hf-internal-testing/tiny-random-t5"
lowercase__ : Optional[Any]= AutoTokenizer.from_pretrained(snake_case__ )
lowercase__ : str= AutoModelForSeqaSeqLM.from_pretrained(snake_case__ )
lowercase__ : Union[str, Any]= tokenizer("This is me" , return_tensors="pt" )
lowercase__ : List[str]= model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Tuple= model.generate(**snake_case__ )
lowercase__ : Dict= model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowercase__ : List[str]= AutoModelForSeqaSeqLM.from_pretrained(snake_case__ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int= model_reloaded.generate(**snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= "hf-internal-testing/tiny-random-t5"
lowercase__ : int= AutoModelForSeqaSeqLM.from_pretrained(snake_case__ )
lowercase__ : str= model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(snake_case__ ):
model.save_pretrained(snake_case__ )
lowercase__ : List[str]= model.reverse_bettertransformer()
model.save_pretrained(snake_case__ )
| 218 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a : List[str] = TypeVar("""T""")
class __UpperCAmelCase( Generic[T] ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any | T= None
lowercase__ : int= len(snake_case__ )
lowercase__ : list[T]= [any_type for _ in range(self.N )] + arr
lowercase__ : List[str]= fnc
self.build()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
lowercase__ : Tuple= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
p += self.N
lowercase__ : Dict= v
while p > 1:
lowercase__ : List[str]= p // 2
lowercase__ : Tuple= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): # noqa: E741
'''simple docstring'''
lowercase__, lowercase__ : List[Any]= l + self.N, r + self.N
lowercase__ : T | None= None
while l <= r:
if l % 2 == 1:
lowercase__ : Tuple= self.st[l] if res is None else self.fn(snake_case__ , self.st[l] )
if r % 2 == 0:
lowercase__ : List[str]= self.st[r] if res is None else self.fn(snake_case__ , self.st[r] )
lowercase__, lowercase__ : Optional[int]= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a : Optional[int] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a : str = SegmentTree(test_array, min)
a : Dict = SegmentTree(test_array, max)
a : Any = SegmentTree(test_array, lambda a, b: a + b)
def lowercase__() ->None:
"""simple docstring"""
for i in range(len(A ) ):
for j in range(A , len(A ) ):
lowercase__ : List[str]= reduce(A , test_array[i : j + 1] )
lowercase__ : Union[str, Any]= reduce(A , test_array[i : j + 1] )
lowercase__ : Optional[int]= reduce(lambda A , A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(A , A )
assert max_range == max_segment_tree.query(A , A )
assert sum_range == sum_segment_tree.query(A , A )
test_all_segments()
for index, value in test_updates.items():
a : Union[str, Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 218 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case( a__ ):
'''simple docstring'''
UpperCAmelCase : str = "gpt_neox"
def __init__( self , A_=5_0432 , A_=6144 , A_=44 , A_=64 , A_=2_4576 , A_="gelu" , A_=0.2_5 , A_=1_0000 , A_=0.0 , A_=0.0 , A_=0.1 , A_=2048 , A_=0.0_2 , A_=1e-5 , A_=True , A_=0 , A_=2 , A_=False , A_=True , A_=None , **A_ , ) -> str:
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = rotary_pct
lowerCAmelCase = rotary_emb_base
lowerCAmelCase = attention_dropout
lowerCAmelCase = hidden_dropout
lowerCAmelCase = classifier_dropout
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = tie_word_embeddings
lowerCAmelCase = use_parallel_residual
lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __snake_case ( self ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowerCAmelCase = self.rope_scaling.get("""type""" , _A )
lowerCAmelCase = self.rope_scaling.get("""factor""" , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' ) | 716 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase = sorted(arg_to_scheduler.keys())
UpperCAmelCase = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class __snake_case( pl.LightningModule ):
'''simple docstring'''
def __init__( self , A_ , A_=None , A_="base" , A_=None , A_=None , A_=None , **A_ , ) -> List[Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(A_ )
lowerCAmelCase = 0
lowerCAmelCase = Path(self.hparams.output_dir )
lowerCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=A_ , **A_ , )
else:
lowerCAmelCase = config
lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , A_ , A_ ):
assert hasattr(self.config , A_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , A_ , getattr(self.hparams , A_ ) )
if tokenizer is None:
lowerCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=A_ , )
else:
lowerCAmelCase = tokenizer
lowerCAmelCase = MODEL_MODES[mode]
if model is None:
lowerCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=A_ , )
else:
lowerCAmelCase = model
def __snake_case ( self , *A_ , **A_ ) -> List[Any]:
lowerCAmelCase = self.model_type.from_pretrained(*A_ , **A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCAmelCase = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model
lowerCAmelCase = ["""bias""", """LayerNorm.weight"""]
lowerCAmelCase = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
lowerCAmelCase = Adafactor(
A_ , lr=self.hparams.learning_rate , scale_parameter=A_ , relative_step=A_ )
else:
lowerCAmelCase = AdamW(
A_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCAmelCase = optimizer
lowerCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self , A_ , A_ ) -> Optional[Any]:
return self.validation_step(A_ , A_ )
def __snake_case ( self , A_ ) -> Tuple:
return self.validation_end(A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self , A_ ) -> Union[str, Any]:
if stage == "test":
lowerCAmelCase = len(self.test_dataloader().dataset )
else:
lowerCAmelCase = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=A_ )
lowerCAmelCase = len(self.train_dataloader().dataset )
def __snake_case ( self , A_ , A_ , A_ = False ) -> int:
raise NotImplementedError("""You must implement this for your task""" )
def __snake_case ( self ) -> Any:
return self.train_loader
def __snake_case ( self ) -> Optional[Any]:
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=A_ )
def __snake_case ( self ) -> Tuple:
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=A_ )
def __snake_case ( self , A_ ) -> List[str]:
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
A_ , list(filter(A_ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.output_dir.joinpath("""best_tfmr""" )
lowerCAmelCase = self.step_count
self.model.save_pretrained(A_ )
self.tokenizer.save_pretrained(A_ )
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
parser.add_argument(
"""--model_name_or_path""" , default=A_ , type=A_ , required=A_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=A_ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=A_ , type=A_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(A_ ).parent / """test_run""" / """cache""" ) , type=A_ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=A_ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=A_ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=A_ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=A_ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5e-5 , type=A_ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=A_ , metavar=A_ , type=A_ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=A_ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=A_ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=A_ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=A_ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=A_ )
parser.add_argument("""--train_batch_size""" , default=32 , type=A_ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=A_ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class __snake_case( pl.Callback ):
'''simple docstring'''
def __snake_case ( self , A_ , A_ ) -> Optional[Any]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __snake_case( pl.Callback ):
'''simple docstring'''
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(A_ )
class __snake_case( pl.Callback ):
'''simple docstring'''
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(A_ )
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
rank_zero_info("""***** Validation results *****""" )
lowerCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A_ , str(metrics[key] ) ) )
def __snake_case ( self , A_ , A_ ) -> Tuple:
rank_zero_info("""***** Test results *****""" )
lowerCAmelCase = trainer.callback_metrics
# Log and save results to file
lowerCAmelCase = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(A_ , """w""" ) as writer:
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(A_ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(A_ , str(metrics[key] ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / """test_run""" / """model_checkpoints""" ) , type=_SCREAMING_SNAKE_CASE , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_SCREAMING_SNAKE_CASE , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_SCREAMING_SNAKE_CASE , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=_SCREAMING_SNAKE_CASE , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / """test_run""" / """dummy-train-data""" ) , type=_SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def _snake_case ( _SCREAMING_SNAKE_CASE : BaseTransformer , _SCREAMING_SNAKE_CASE : argparse.Namespace , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : int=[] , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : Dict , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
lowerCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
lowerCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_SCREAMING_SNAKE_CASE )
if logging_callback is None:
lowerCAmelCase = LoggingCallback()
lowerCAmelCase = {}
if args.fpaa:
lowerCAmelCase = 16
if args.gpus > 1:
lowerCAmelCase = """auto"""
lowerCAmelCase = """ddp"""
lowerCAmelCase = args.accumulate_grad_batches
lowerCAmelCase = None
lowerCAmelCase = """auto"""
lowerCAmelCase = pl.Trainer.from_argparse_args(
_SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(_SCREAMING_SNAKE_CASE )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer | 344 | 0 |
import cmath
import math
def UpperCamelCase ( _A : float , _A : float , _A : float , _A : float )-> complex:
"""simple docstring"""
A__ = math.radians(_A )
A__ = math.radians(_A )
# Convert voltage and current to rectangular form
A__ = cmath.rect(_A , _A )
A__ = cmath.rect(_A , _A )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] )-> Any:
"""simple docstring"""
A__ = OmegaConf.load(_A )
A__ = torch.load(_A , map_location="cpu" )["model"]
A__ = list(state_dict.keys() )
# extract state_dict for VQVAE
A__ = {}
A__ = "first_stage_model."
for key in keys:
if key.startswith(_A ):
A__ = state_dict[key]
# extract state_dict for UNetLDM
A__ = {}
A__ = "model.diffusion_model."
for key in keys:
if key.startswith(_A ):
A__ = state_dict[key]
A__ = config.model.params.first_stage_config.params
A__ = config.model.params.unet_config.params
A__ = VQModel(**_A ).eval()
vqvae.load_state_dict(_A )
A__ = UNetLDMModel(**_A ).eval()
unet.load_state_dict(_A )
A__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_A , )
A__ = LDMPipeline(_A , _A , _A )
pipeline.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
UpperCAmelCase_ : str = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 491 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( a__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = XLMTokenizer
snake_case_ = False
def A_ ( self : str ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowercase__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowercase__ ) )
def A_ ( self : Any , a : Optional[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = '''lower newer'''
SCREAMING_SNAKE_CASE__ : Dict = '''lower newer'''
return input_text, output_text
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ : Optional[int] = '''lower'''
SCREAMING_SNAKE_CASE__ : str = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE__ : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@slow
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 26 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase_ ( a_ ):
__magic_name__ : Any = """detr"""
__magic_name__ : Optional[Any] = ["""past_key_values"""]
__magic_name__ : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=None , _lowercase : str=3 , _lowercase : Any=1_0_0 , _lowercase : List[Any]=6 , _lowercase : int=2_0_4_8 , _lowercase : List[str]=8 , _lowercase : List[str]=6 , _lowercase : int=2_0_4_8 , _lowercase : Any=8 , _lowercase : Any=0.0 , _lowercase : List[str]=0.0 , _lowercase : int=True , _lowercase : Optional[int]="relu" , _lowercase : List[Any]=2_5_6 , _lowercase : List[Any]=0.1 , _lowercase : Dict=0.0 , _lowercase : Optional[Any]=0.0 , _lowercase : int=0.02 , _lowercase : int=1.0 , _lowercase : Tuple=False , _lowercase : Union[str, Any]="sine" , _lowercase : Union[str, Any]="resnet50" , _lowercase : Tuple=True , _lowercase : Any=False , _lowercase : Optional[Any]=1 , _lowercase : Any=5 , _lowercase : List[str]=2 , _lowercase : List[Any]=1 , _lowercase : Tuple=1 , _lowercase : Any=5 , _lowercase : List[str]=2 , _lowercase : str=0.1 , **_lowercase : Optional[int] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase__ : Dict = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_lowercase , _lowercase ):
lowerCAmelCase__ : Dict = backbone_config.get("model_type" )
lowerCAmelCase__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ : str = config_class.from_dict(_lowercase )
# set timm attributes to None
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = None, None, None
lowerCAmelCase__ : Dict = use_timm_backbone
lowerCAmelCase__ : Dict = backbone_config
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : str = num_queries
lowerCAmelCase__ : List[str] = d_model
lowerCAmelCase__ : Optional[Any] = encoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = encoder_layers
lowerCAmelCase__ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase__ : int = decoder_ffn_dim
lowerCAmelCase__ : Optional[Any] = decoder_layers
lowerCAmelCase__ : Optional[Any] = decoder_attention_heads
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : List[Any] = activation_function
lowerCAmelCase__ : Optional[int] = init_std
lowerCAmelCase__ : Dict = init_xavier_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : Union[str, Any] = decoder_layerdrop
lowerCAmelCase__ : str = encoder_layers
lowerCAmelCase__ : Any = auxiliary_loss
lowerCAmelCase__ : Any = position_embedding_type
lowerCAmelCase__ : Any = backbone
lowerCAmelCase__ : List[str] = use_pretrained_backbone
lowerCAmelCase__ : Optional[Any] = dilation
# Hungarian matcher
lowerCAmelCase__ : List[str] = class_cost
lowerCAmelCase__ : Dict = bbox_cost
lowerCAmelCase__ : Dict = giou_cost
# Loss coefficients
lowerCAmelCase__ : Dict = mask_loss_coefficient
lowerCAmelCase__ : Optional[int] = dice_loss_coefficient
lowerCAmelCase__ : Union[str, Any] = bbox_loss_coefficient
lowerCAmelCase__ : Tuple = giou_loss_coefficient
lowerCAmelCase__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : int ):
return self.d_model
@classmethod
def _lowerCAmelCase ( cls : str , _lowercase : PretrainedConfig , **_lowercase : Optional[Any] ):
return cls(backbone_config=_lowercase , **_lowercase )
def _lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase__ : Tuple = self.backbone_config.to_dict()
lowerCAmelCase__ : List[str] = self.__class__.model_type
return output
class lowercase_ ( a_ ):
__magic_name__ : List[Any] = version.parse("""1.11""" )
@property
def _lowerCAmelCase ( self : List[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCAmelCase ( self : int ):
return 1e-5
@property
def _lowerCAmelCase ( self : Any ):
return 1_2
| 308 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase__ ( lowerCamelCase : List[str]=3_2 , lowerCamelCase : Optional[Any]=1_0 , lowerCamelCase : List[str]=1_0_0 , lowerCamelCase : Optional[Any]=1_0_2_6 , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]="data/tokenized_stories_train_wikitext103.jbl" , lowerCamelCase : Union[str, Any]="igf_context_pairs.jbl" , ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = generate_datasets(
lowerCamelCase , lowerCamelCase , number=lowerCamelCase , min_len=1_0_2_6 , trim=lowerCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCAmelCase__ : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowerCAmelCase__ : List[Any] = load_gpta("gpt2" ).to(lowerCamelCase )
print("computing perplexity on objective set" )
lowerCAmelCase__ : Union[str, Any] = compute_perplexity(lowerCamelCase , lowerCamelCase , lowerCamelCase ).item()
print("perplexity on objective set:" , lowerCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : Dict=1_5 , lowerCamelCase : Tuple=1_2_8 , lowerCamelCase : Dict=1_0_0 , lowerCamelCase : Union[str, Any]="igf_model.pt" , ) -> List[Any]:
set_seed(4_2 )
# Load pre-trained model
lowerCAmelCase__ : Any = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowerCAmelCase__ : Any = SecondaryLearner(lowerCamelCase )
# Train secondary learner
lowerCAmelCase__ : Tuple = train_secondary_learner(
lowerCamelCase , lowerCamelCase , max_epochs=lowerCamelCase , batch_size=lowerCamelCase , eval_freq=1_0_0 , igf_model_path=lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=3_2 , lowerCamelCase : Union[str, Any]=1_0_0_0 , lowerCamelCase : int=1_6 , lowerCamelCase : str=1.0 , lowerCamelCase : int=recopy_gpta , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=1_0 , lowerCamelCase : Dict="gpt2_finetuned.pt" , ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase__ : Union[str, Any] = RandomSampler(lowerCamelCase )
lowerCAmelCase__ : Optional[int] = DataLoader(lowerCamelCase , sampler=lowerCamelCase )
lowerCAmelCase__ : Optional[int] = max_steps // (len(lowerCamelCase )) + 1
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Dict = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = recopy_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCamelCase )
secondary_learner.eval()
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Tuple = []
# Compute the performance of the transformer model at the beginning
lowerCAmelCase__ : str = compute_perplexity(lowerCamelCase , lowerCamelCase , lowerCamelCase )
test_perps.append(lowerCamelCase )
print("Test perplexity, step" , lowerCamelCase , ":" , lowerCamelCase )
for epoch in range(int(lowerCamelCase ) ):
for step, example in enumerate(lowerCamelCase ):
torch.cuda.empty_cache()
lowerCAmelCase__ : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCAmelCase__ : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCAmelCase__ : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase )
lowerCAmelCase__ : Tuple = True
if secondary_learner is not None:
lowerCAmelCase__ : Tuple = secondary_learner.forward(
torch.tensor(lowerCamelCase , dtype=torch.long , device=lowerCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
lowerCAmelCase__ : Dict = -1
if predicted_q < threshold:
lowerCAmelCase__ : Union[str, Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCAmelCase__ : str = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCAmelCase__ : Dict = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCAmelCase__ : Dict = compute_perplexity(lowerCamelCase , lowerCamelCase , lowerCamelCase )
test_perps.append(lowerCamelCase )
print("Test perplexity, step" , lowerCamelCase , ":" , lowerCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=lowerCamelCase , default=lowerCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=lowerCamelCase , default=lowerCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=lowerCamelCase , type=lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=lowerCamelCase , default=lowerCamelCase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=3_2 , type=lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_0_0 , type=lowerCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_0_0 , type=lowerCamelCase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_0_0_0 , type=lowerCamelCase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_2_8 , type=lowerCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=1_6 , type=lowerCamelCase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=1_0 , type=lowerCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_0_0 , type=lowerCamelCase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_0_2_6 , type=lowerCamelCase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=1_5 , type=lowerCamelCase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=lowerCamelCase , type=lowerCamelCase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=lowerCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=lowerCamelCase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=lowerCamelCase , type=lowerCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=lowerCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowerCAmelCase__ : Optional[int] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowerCAmelCase__ : List[Any] = training_secondary_learner(
lowerCamelCase , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowerCAmelCase__ : Optional[Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = generate_datasets(
context_len=3_2 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_0_0 , min_len=1_0_2_6 , trim=lowerCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCamelCase , lowerCamelCase , lowerCamelCase , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=lowerCamelCase , secondary_learner=lowerCamelCase , eval_interval=1_0 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 308 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'nllb-moe'
_lowercase = ['past_key_values']
_lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=128_112 , __UpperCAmelCase=1_024 , __UpperCAmelCase=12 , __UpperCAmelCase=4_096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4_096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.05 , __UpperCAmelCase=0.05 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=1_024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=64 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=0.001 , __UpperCAmelCase=0.001 , __UpperCAmelCase="all" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=1.0 , __UpperCAmelCase=0.2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE_ : List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : int =d_model
SCREAMING_SNAKE_CASE_ : List[str] =encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : int =encoder_layers
SCREAMING_SNAKE_CASE_ : List[Any] =encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] =decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] =decoder_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] =decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] =dropout
SCREAMING_SNAKE_CASE_ : int =attention_dropout
SCREAMING_SNAKE_CASE_ : List[Any] =activation_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] =activation_function
SCREAMING_SNAKE_CASE_ : Optional[int] =init_std
SCREAMING_SNAKE_CASE_ : List[str] =encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] =decoder_layerdrop
SCREAMING_SNAKE_CASE_ : List[str] =use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] =encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : Dict =router_z_loss_coef
SCREAMING_SNAKE_CASE_ : Any =router_aux_loss_coef
SCREAMING_SNAKE_CASE_ : int =decoder_sparse_step
SCREAMING_SNAKE_CASE_ : Union[str, Any] =encoder_sparse_step
SCREAMING_SNAKE_CASE_ : Dict =num_experts
SCREAMING_SNAKE_CASE_ : Tuple =expert_capacity
SCREAMING_SNAKE_CASE_ : Optional[int] =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE_ : Optional[Any] =router_dtype
SCREAMING_SNAKE_CASE_ : Optional[int] =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE_ : Any =batch_prioritized_routing
SCREAMING_SNAKE_CASE_ : Optional[int] =second_expert_policy
SCREAMING_SNAKE_CASE_ : List[Any] =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE_ : Any =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE_ : int =moe_token_dropout
SCREAMING_SNAKE_CASE_ : List[Any] =output_router_logits
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 153 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Dict =KarrasVeScheduler()
SCREAMING_SNAKE_CASE_ : Tuple =KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type='numpy' ).images
SCREAMING_SNAKE_CASE_ : Any =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type='numpy' , return_dict=__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Tuple =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='google/ncsnpp-celebahq-256'
SCREAMING_SNAKE_CASE_ : List[str] =UNetaDModel.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =KarrasVeScheduler()
SCREAMING_SNAKE_CASE_ : Dict =KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(num_inference_steps=20 , generator=__UpperCAmelCase , output_type='numpy' ).images
SCREAMING_SNAKE_CASE_ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[Any] =np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 153 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def _snake_case ( *lowercase_ : List[Any] , **lowercase_ : Tuple ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
_lowerCAmelCase : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _snake_case ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : str = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _snake_case ( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] ):
snake_case_ : str = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
'''score''': ANY(lowercase_ ),
'''label''': ANY(lowercase_ ),
'''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )},
} , )
import datasets
snake_case_ : Tuple = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case_ : Dict = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
snake_case_ : Dict = object_detector(lowercase_ , threshold=0.0 )
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
'''score''': ANY(lowercase_ ),
'''label''': ANY(lowercase_ ),
'''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def _snake_case ( self : List[str] ):
pass
@require_torch
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Optional[int] = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
snake_case_ : Dict = AutoModelForObjectDetection.from_pretrained(lowercase_ )
snake_case_ : Optional[int] = AutoFeatureExtractor.from_pretrained(lowercase_ )
snake_case_ : Union[str, Any] = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
snake_case_ : Any = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
snake_case_ : Union[str, Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = '''facebook/detr-resnet-50'''
snake_case_ : List[str] = AutoModelForObjectDetection.from_pretrained(lowercase_ )
snake_case_ : List[Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
snake_case_ : str = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
snake_case_ : List[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case_ : int = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self : Optional[Any] ):
snake_case_ : Dict = '''facebook/detr-resnet-50'''
snake_case_ : int = pipeline('''object-detection''' , model=lowercase_ )
snake_case_ : str = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
snake_case_ : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = 0.99_85
snake_case_ : List[str] = '''facebook/detr-resnet-50'''
snake_case_ : Dict = pipeline('''object-detection''' , model=lowercase_ )
snake_case_ : int = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Union[str, Any] = '''Narsil/layoutlmv3-finetuned-funsd'''
snake_case_ : str = 0.99_93
snake_case_ : Tuple = pipeline('''object-detection''' , model=lowercase_ , threshold=lowercase_ )
snake_case_ : str = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 123 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase__ : int = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : str = ["""input_features""", """attention_mask"""]
def __init__( self : Optional[int] , lowercase_ : str=80 , lowercase_ : Optional[int]=16000 , lowercase_ : List[Any]=0.0 , lowercase_ : int=10 , lowercase_ : Optional[int]=25 , lowercase_ : List[Any]="hamming_window" , lowercase_ : Tuple=3_27_68.0 , lowercase_ : Any=0.97 , lowercase_ : Dict=1.0 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : Optional[Any]=False , **lowercase_ : str , ):
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
snake_case_ : List[Any] = feature_size
snake_case_ : List[Any] = sampling_rate
snake_case_ : str = padding_value
snake_case_ : List[Any] = hop_length
snake_case_ : Dict = win_length
snake_case_ : Optional[int] = frame_signal_scale
snake_case_ : int = preemphasis_coeff
snake_case_ : Optional[int] = mel_floor
snake_case_ : List[str] = normalize_means
snake_case_ : Union[str, Any] = normalize_vars
snake_case_ : Optional[int] = win_function
snake_case_ : List[Any] = return_attention_mask
snake_case_ : Any = win_length * sampling_rate // 1000
snake_case_ : Union[str, Any] = hop_length * sampling_rate // 1000
snake_case_ : Dict = optimal_fft_length(self.sample_size )
snake_case_ : List[str] = (self.n_fft // 2) + 1
def _snake_case ( self : Union[str, Any] , lowercase_ : np.array ):
if self.win_function == "hamming_window":
snake_case_ : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ )
else:
snake_case_ : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function )
snake_case_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
snake_case_ : str = spectrogram(
one_waveform * self.frame_signal_scale , window=lowercase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowercase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowercase_ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any] ):
# make sure we normalize float32 arrays
if self.normalize_means:
snake_case_ : int = x[:input_length].mean(axis=0 )
snake_case_ : int = np.subtract(lowercase_ , lowercase_ )
if self.normalize_vars:
snake_case_ : Tuple = x[:input_length].std(axis=0 )
snake_case_ : Union[str, Any] = np.divide(lowercase_ , lowercase_ )
if input_length < x.shape[0]:
snake_case_ : Tuple = padding_value
# make sure array is in float32
snake_case_ : List[Any] = x.astype(np.floataa )
return x
def _snake_case ( self : Union[str, Any] , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None ):
snake_case_ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowercase_ , lowercase_ , self.padding_value ) for x, n in zip(lowercase_ , lowercase_ )]
def __call__( self : Tuple , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case_ : Dict = isinstance(lowercase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
snake_case_ : List[Any] = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : int = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
snake_case_ : int = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Dict = [raw_speech]
# extract fbank features
snake_case_ : Optional[int] = [self._extract_mfsc_features(lowercase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
snake_case_ : int = BatchFeature({'''input_features''': features} )
snake_case_ : Tuple = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
snake_case_ : Dict = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , lowercase_ ):
snake_case_ : Tuple = [np.asarray(lowercase_ , dtype=np.floataa ) for feature in input_features]
snake_case_ : List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case_ : Tuple = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
snake_case_ : str = (
np.array(lowercase_ , dtype=np.intaa )
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
snake_case_ : Any = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowercase_ )
if return_tensors is not None:
snake_case_ : Any = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
| 123 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Union[List[PIL.Image.Image], np.ndarray]
__a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : np.ndarray
__a : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 433 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase__ ( pl.LightningModule):
'''simple docstring'''
def __init__( self , A ) ->List[str]:
super().__init__()
UpperCAmelCase__ :Optional[int] = model
UpperCAmelCase__ :Optional[int] = 2
UpperCAmelCase__ :Union[str, Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A__ ( self ) ->str:
pass
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Optional[Any] = LongformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[int] = LightningModel(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
UpperCAmelCase__ :Union[str, Any] = LongformerForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 433 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 492 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class UpperCamelCase :
def __init__( self : str , UpperCAmelCase__ : int | None = None ) -> Tuple:
_a : List[str] = value
_a : Node | None = None # Added in order to delete a node easier
_a : Node | None = None
_a : Node | None = None
def __repr__( self : Any ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class UpperCamelCase :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Node | None = None ) -> Any:
_a : Tuple = root
def __str__( self : Any ) -> str:
return str(self.root )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node | None ) -> None:
if new_children is not None: # reset its kids
_a : Optional[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(UpperCAmelCase__ ): # If it is the right children
_a : List[Any] = new_children
else:
_a : Tuple = new_children
else:
_a : Any = new_children
def _lowercase ( self : List[str] , UpperCAmelCase__ : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self : str ) -> bool:
return self.root is None
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> None:
_a : Tuple = Node(UpperCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
_a : Optional[Any] = new_node # set its root
else: # Tree is not empty
_a : Tuple = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_a : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
_a : Optional[Any] = parent_node.left
else:
if parent_node.right is None:
_a : Union[str, Any] = new_node
break
else:
_a : int = parent_node.right
_a : Any = parent_node
def _lowercase ( self : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> None:
for value in values:
self.__insert(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Node | None:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
_a : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_a : int = node.left if value < node.value else node.right
return node
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_a : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
_a : Union[str, Any] = node.right
return node
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Node | None = None ) -> Node | None:
if node is None:
_a : Union[str, Any] = self.root
if self.root is None:
return None
if not self.empty():
_a : Optional[Any] = self.root
while node.left is not None:
_a : List[str] = node.left
return node
def _lowercase ( self : int , UpperCAmelCase__ : int ) -> None:
_a : Tuple = self.search(UpperCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(UpperCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(UpperCAmelCase__ , node.left )
else:
_a : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_a : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any]=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self : List[str] , UpperCAmelCase__ : list , UpperCAmelCase__ : Node | None ) -> None:
if node:
self.inorder(UpperCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(UpperCAmelCase__ , node.right )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Node ) -> int:
_a : list[int] = []
self.inorder(UpperCAmelCase__ , UpperCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = []
if curr_node is not None:
_a : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
_a : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCamelCase__ )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 389 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE (metaclass=lowercase__ ):
A__ = ['torch', 'torchsde']
def __init__( self : Optional[int] , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : int ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 574 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCamelCase__ :Iterable[str] , UpperCamelCase__ :int ) -> Generator[tuple[str, ...], None, None]:
snake_case__ : Union[str, Any] = iter(UpperCamelCase__ )
while True:
snake_case__ : Tuple = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> str:
snake_case__ : str = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case__ : List[str] = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
snake_case__ : List[str] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case__ : Union[str, Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str ) -> str:
snake_case__ : List[str] = generate_table(UpperCamelCase__ )
snake_case__ : List[str] = prepare_input(UpperCamelCase__ )
snake_case__ : int = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
snake_case__ , snake_case__ : List[str] = divmod(table.index(UpperCamelCase__ ) , 5 )
snake_case__ , snake_case__ : Tuple = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str ) -> str:
snake_case__ : List[Any] = generate_table(UpperCamelCase__ )
snake_case__ : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
snake_case__ , snake_case__ : Union[str, Any] = divmod(table.index(UpperCamelCase__ ) , 5 )
snake_case__ , snake_case__ : Optional[Any] = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 574 | 1 |
def UpperCamelCase ( _a , _a , _a ) -> list:
'''simple docstring'''
lowercase_ :Tuple = len(_a )
lowercase_ :int = [[0] * n for i in range(_a )]
for i in range(_a ):
lowercase_ :Any = y_points[i]
for i in range(2 , _a ):
for j in range(_a , _a ):
lowercase_ :Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 257 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 717 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A_( A : Union[str, Any] , A : Dict , A : Tuple):
UpperCamelCase = True
UpperCamelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(a__ , a__ , a__)
order.append(a__)
return order
def A_( A : Union[str, Any] , A : Tuple , A : int):
UpperCamelCase = True
UpperCamelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(a__ , a__ , a__)
return component
def A_( A : str):
UpperCamelCase = len(a__) * [False]
UpperCamelCase = {vert: [] for vert in range(len(a__))}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(a__)
UpperCamelCase = []
for i, was_visited in enumerate(a__):
if not was_visited:
order += topology_sort(a__ , a__ , a__)
UpperCamelCase = []
UpperCamelCase = len(a__) * [False]
for i in range(len(a__)):
UpperCamelCase = order[len(a__) - i - 1]
if not visited[vert]:
UpperCamelCase = find_components(a__ , a__ , a__)
components_list.append(a__)
return components_list
| 3 |
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
if len(a__) == 0:
return []
a_ , a_ : List[Any] = min(a__), max(a__)
a_ : Tuple = int(max_value - min_value) + 1
a_ : list[list] = [[] for _ in range(a__)]
for i in my_list:
buckets[int(i - min_value)].append(a__)
return [v for bucket in buckets for v in sorted(a__)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 540 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , A__ : int ) -> None:
'''simple docstring'''
a__ : str = num_of_nodes
a__ : list[list[int]] = []
a__ : dict[int, int] = {}
def __lowerCAmelCase ( self : Dict , A__ : int , A__ : int , A__ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def __lowerCAmelCase ( self : str , A__ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __lowerCAmelCase ( self : List[str] , A__ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ : Dict = self.find_component(A__ )
def __lowerCAmelCase ( self : Optional[Any] , A__ : list[int] , A__ : int , A__ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
a__ : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(A__ )
elif component_size[u_node] >= component_size[v_node]:
a__ : Optional[int] = self.find_component(A__ )
component_size[u_node] += component_size[v_node]
self.set_component(A__ )
def __lowerCAmelCase ( self : int ) -> None:
'''simple docstring'''
a__ : Optional[int] = []
a__ : Tuple = 0
a__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
a__ : Optional[int] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ : Optional[int] = edge
a__ : Union[str, Any] = self.m_component[u]
a__ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ : Tuple = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(A__ , A__ ):
a__ , a__ , a__ : Any = edge
a__ : Optional[Any] = self.m_component[u]
a__ : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(A__ , A__ , A__ )
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
a__ : Optional[int] = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}' )
def __a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = LEDConfig
__UpperCamelCase = {}
__UpperCamelCase = "gelu"
def __init__( self : str , A__ : Union[str, Any] , A__ : str=1_3 , A__ : Optional[int]=7 , A__ : str=True , A__ : Union[str, Any]=False , A__ : Dict=9_9 , A__ : Any=3_2 , A__ : Optional[int]=2 , A__ : Union[str, Any]=4 , A__ : Tuple=3_7 , A__ : str=0.1 , A__ : Any=0.1 , A__ : str=2_0 , A__ : Dict=2 , A__ : Union[str, Any]=1 , A__ : int=0 , A__ : Dict=4 , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = parent
a__ : Dict = batch_size
a__ : List[str] = seq_length
a__ : Optional[int] = is_training
a__ : Dict = use_labels
a__ : Dict = vocab_size
a__ : Tuple = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : List[Any] = intermediate_size
a__ : Tuple = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : Optional[int] = eos_token_id
a__ : Optional[int] = pad_token_id
a__ : int = bos_token_id
a__ : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ : int = prepare_led_inputs_dict(A__ , A__ , A__ )
a__ : List[str] = tf.concat(
[tf.zeros_like(A__ )[:, :-1], tf.ones_like(A__ )[:, -1:]] , axis=-1 , )
a__ : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = TFLEDModel(config=A__ ).get_decoder()
a__ : Dict = inputs_dict['''input_ids''']
a__ : List[str] = input_ids[:1, :]
a__ : Any = inputs_dict['''attention_mask'''][:1, :]
a__ : Dict = 1
# first forward pass
a__ : Any = model(A__ , attention_mask=A__ , use_cache=A__ )
a__ , a__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : List[Any] = model(A__ , attention_mask=A__ )[0]
a__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
a__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1E-3 )
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : str=None , ):
if attention_mask is None:
a__ : str = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = TFLEDModelTester(self )
a__ : List[str] = ConfigTester(self , config_class=A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[Any] = tf.zeros_like(inputs_dict['''attention_mask'''] )
a__ : List[str] = 2
a__ : int = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
a__ : Dict = True
a__ : Optional[int] = self.model_tester.seq_length
a__ : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(A__ : int ):
a__ : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(A__ : Any ):
a__ : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
a__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ : Any = True
a__ : List[str] = False
a__ : int = False
a__ : Union[str, Any] = model_class(A__ )
a__ : List[str] = model(self._prepare_for_class(A__ , A__ ) )
a__ : Optional[Any] = len(A__ )
self.assertEqual(config.output_hidden_states , A__ )
check_encoder_attentions_output(A__ )
if self.is_encoder_decoder:
a__ : List[Any] = model_class(A__ )
a__ : Optional[Any] = model(self._prepare_for_class(A__ , A__ ) )
self.assertEqual(config.output_hidden_states , A__ )
check_decoder_attentions_output(A__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ : List[Any] = True
a__ : int = model_class(A__ )
a__ : Union[str, Any] = model(self._prepare_for_class(A__ , A__ ) )
self.assertEqual(config.output_hidden_states , A__ )
check_encoder_attentions_output(A__ )
# Check attention is always last and order is fine
a__ : str = True
a__ : str = True
a__ : Tuple = model_class(A__ )
a__ : Any = model(self._prepare_for_class(A__ , A__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A__ ) )
self.assertEqual(model.config.output_hidden_states , A__ )
check_encoder_attentions_output(A__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
pass
def __a ( lowerCAmelCase__ : str ):
return tf.constant(lowerCAmelCase__ , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
a__ : Tuple = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : List[Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : Tuple = prepare_led_inputs_dict(model.config , A__ , A__ )
a__ : Optional[Any] = model(**A__ )[0]
a__ : Optional[Any] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , A__ )
# change to expected output here
a__ : Any = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-3 )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
a__ : List[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : str = prepare_led_inputs_dict(model.config , A__ , A__ )
a__ : Optional[int] = model(**A__ )[0]
a__ : List[str] = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , A__ )
# change to expected output here
a__ : Optional[Any] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-3 , rtol=1E-3 )
| 340 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 178 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Dict , lowercase : str ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = AlbertConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : int = AlbertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 178 | 1 |
"""simple docstring"""
from __future__ import annotations
snake_case = '''Muhammad Umer Farooq'''
snake_case = '''MIT'''
snake_case = '''1.0.0'''
snake_case = '''Muhammad Umer Farooq'''
snake_case = '''contact@muhammadumerfarooq.me'''
snake_case = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , __lowerCamelCase : str ):
"""simple docstring"""
super().__init__()
_snake_case = []
_snake_case = domain
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : list[tuple[str, str | None]] ):
"""simple docstring"""
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_snake_case = parse.urljoin(self.domain , __lowerCamelCase )
self.urls.append(__lowerCamelCase )
def snake_case ( lowerCAmelCase_ ) -> str:
return ".".join(get_sub_domain_name(lowerCAmelCase_ ).split('''.''' )[-2:] )
def snake_case ( lowerCAmelCase_ ) -> str:
return parse.urlparse(lowerCAmelCase_ ).netloc
def snake_case ( lowerCAmelCase_ = "https://github.com" ) -> list[str]:
_snake_case = get_domain_name(lowerCAmelCase_ )
# Initialize the parser
_snake_case = Parser(lowerCAmelCase_ )
try:
# Open URL
_snake_case = requests.get(lowerCAmelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_snake_case = requests.get(lowerCAmelCase_ )
# Get the valid email.
_snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCAmelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 704 |
"""simple docstring"""
from math import isqrt
def snake_case ( lowerCAmelCase_ ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase_ ) + 1 ) )
def snake_case ( lowerCAmelCase_ = 10**6 ) -> int:
_snake_case = 0
_snake_case = 1
_snake_case = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 404 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase ( lowercase : Any ) -> List[str]:
return getitem, k
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Any:
return setitem, k, v
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
return delitem, k
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict , *lowercase : Union[str, Any] ) -> int:
try:
return fun(lowercase , *lowercase ), None
except Exception as e:
return None, e
lowerCAmelCase_ : Optional[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase_ : Optional[int] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase_ : int = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase_ : List[Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[int]:
_a = HashMap(initial_block_size=4 )
_a = {}
for _, (fun, *args) in enumerate(lowercase ):
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
assert my_res == py_res
assert str(lowercase ) == str(lowercase )
assert set(lowercase ) == set(lowercase )
assert len(lowercase ) == len(lowercase )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase ( ) -> str:
def is_public(lowercase : str ) -> bool:
return not name.startswith("_" )
_a = {name for name in dir({} ) if is_public(lowercase )}
_a = {name for name in dir(HashMap() ) if is_public(lowercase )}
assert dict_public_names > hash_public_names
| 692 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase ( nn.Module ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase )
lowerCAmelCase = resnets
lowerCAmelCase = attentions
if self.add_downsample:
lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase , lowercase=True ) -> str:
lowerCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowerCAmelCase = resnet(lowercase , lowercase , deterministic=lowercase )
lowerCAmelCase = attn(lowercase , lowercase , deterministic=lowercase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase = self.downsamplers_a(lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase ( nn.Module ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = jnp.floataa
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
lowerCAmelCase = resnets
if self.add_downsample:
lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase=True ) -> str:
lowerCAmelCase = ()
for resnet in self.resnets:
lowerCAmelCase = resnet(lowercase , lowercase , deterministic=lowercase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase = self.downsamplers_a(lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase ( nn.Module ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase )
lowerCAmelCase = resnets
lowerCAmelCase = attentions
if self.add_upsample:
lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase , lowercase , lowercase=True ) -> Optional[Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowerCAmelCase = res_hidden_states_tuple[-1]
lowerCAmelCase = res_hidden_states_tuple[:-1]
lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase = resnet(lowercase , lowercase , deterministic=lowercase )
lowerCAmelCase = attn(lowercase , lowercase , deterministic=lowercase )
if self.add_upsample:
lowerCAmelCase = self.upsamplers_a(lowercase )
return hidden_states
class lowercase ( nn.Module ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = jnp.floataa
def _snake_case ( self ) -> Any:
lowerCAmelCase = []
for i in range(self.num_layers ):
lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
lowerCAmelCase = resnets
if self.add_upsample:
lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , lowercase , lowercase , lowercase , lowercase=True ) -> Union[str, Any]:
for resnet in self.resnets:
# pop res hidden states
lowerCAmelCase = res_hidden_states_tuple[-1]
lowerCAmelCase = res_hidden_states_tuple[:-1]
lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase = resnet(lowercase , lowercase , deterministic=lowercase )
if self.add_upsample:
lowerCAmelCase = self.upsamplers_a(lowercase )
return hidden_states
class lowercase ( nn.Module ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
def _snake_case ( self ) -> str:
# there is always at least one resnet
lowerCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowerCAmelCase = []
for _ in range(self.num_layers ):
lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase )
lowerCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase )
lowerCAmelCase = resnets
lowerCAmelCase = attentions
def __call__( self , lowercase , lowercase , lowercase , lowercase=True ) -> Any:
lowerCAmelCase = self.resnets[0](lowercase , lowercase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowerCAmelCase = attn(lowercase , lowercase , deterministic=lowercase )
lowerCAmelCase = resnet(lowercase , lowercase , deterministic=lowercase )
return hidden_states
| 704 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=False ):
'''simple docstring'''
lowerCAmelCase = OmegaConf.load(SCREAMING_SNAKE_CASE )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE ) ) )
return config
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
if conf_path is None:
lowerCAmelCase = """./model_checkpoints/vqgan_only.yaml"""
lowerCAmelCase = load_config(SCREAMING_SNAKE_CASE , display=SCREAMING_SNAKE_CASE )
lowerCAmelCase = VQModel(**config.model.params )
if ckpt_path is None:
lowerCAmelCase = """./model_checkpoints/vqgan_only.pt"""
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
if ".ckpt" in ckpt_path:
lowerCAmelCase = sd["""state_dict"""]
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
del sd
return model
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = model.encode(SCREAMING_SNAKE_CASE )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCAmelCase = model.decode(SCREAMING_SNAKE_CASE )
return xrec
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = string.rsplit(""".""" , 1 )
if reload:
lowerCAmelCase = importlib.import_module(SCREAMING_SNAKE_CASE )
importlib.reload(SCREAMING_SNAKE_CASE )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE , package=SCREAMING_SNAKE_CASE ) , cls )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int=True ):
'''simple docstring'''
lowerCAmelCase = instantiate_from_config(SCREAMING_SNAKE_CASE )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if ckpt:
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location="""cpu""" )
lowerCAmelCase = pl_sd["""global_step"""]
print(F'loaded model from global step {global_step}.' )
else:
lowerCAmelCase = {"""state_dict""": None}
lowerCAmelCase = None
lowerCAmelCase = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=SCREAMING_SNAKE_CASE , eval_mode=SCREAMING_SNAKE_CASE )["""model"""]
return model, global_step
| 393 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class a ( lowercase__ ):
"""simple docstring"""
a : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
a : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
a : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
a : str = "question"
a : str = "context"
a : str = "answers"
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCAmelCase : int = [True] * (num + 1)
__UpperCAmelCase : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCamelCase ):
__UpperCAmelCase : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 63 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A ( A_ ):
'''simple docstring'''
def __get__( self : Tuple ,_snake_case : int ,_snake_case : Dict=None ) -> Dict:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowercase__ : Optional[int] = '''__cached_''' + self.fget.__name__
lowercase__ : Optional[Any] = getattr(_snake_case ,_snake_case ,_snake_case )
if cached is None:
lowercase__ : Optional[int] = self.fget(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
return cached
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
if is_torch_fx_proxy(__lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowerCamelCase , np.ndarray )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
return isinstance(__lowerCamelCase , np.ndarray )
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
return _is_numpy(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
import torch
return isinstance(__lowerCamelCase , torch.Tensor )
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
return False if not is_torch_available() else _is_torch(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
import torch
return isinstance(__lowerCamelCase , torch.device )
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
return False if not is_torch_available() else _is_torch_device(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if hasattr(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Union[str, Any] = getattr(__lowerCamelCase , __lowerCamelCase )
else:
return False
return isinstance(__lowerCamelCase , torch.dtype )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
return False if not is_torch_available() else _is_torch_dtype(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
import tensorflow as tf
return isinstance(__lowerCamelCase , tf.Tensor )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
return False if not is_tf_available() else _is_tensorflow(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowerCamelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__lowerCamelCase )
return type(__lowerCamelCase ) == tf.Tensor
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
import jax.numpy as jnp # noqa: F811
return isinstance(__lowerCamelCase , jnp.ndarray )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
return False if not is_flax_available() else _is_jax(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return [to_py_obj(__lowerCamelCase ) for o in obj]
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase ).tolist()
elif isinstance(__lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return np.array(__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase )
else:
return obj
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : str = fields(self )
# Safety and consistency checks
if not len(_snake_case ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowercase__ : Optional[Any] = getattr(self ,class_fields[0].name )
lowercase__ : Optional[int] = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_snake_case ):
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Union[str, Any] = first_field.items()
lowercase__ : Optional[Any] = True
else:
try:
lowercase__ : Optional[int] = iter(_snake_case )
lowercase__ : Union[str, Any] = True
except TypeError:
lowercase__ : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_snake_case ):
if (
not isinstance(_snake_case ,(list, tuple) )
or not len(_snake_case ) == 2
or not isinstance(element[0] ,_snake_case )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase__ : List[str] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
lowercase__ : Optional[int] = element[1]
elif first_field is not None:
lowercase__ : Dict = first_field
else:
for field in class_fields:
lowercase__ : List[Any] = getattr(self ,field.name )
if v is not None:
lowercase__ : Union[str, Any] = v
def __delitem__( self : List[Any] ,*_snake_case : Any ,**_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase ( self : str ,*_snake_case : Dict ,**_snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase ( self : Dict ,*_snake_case : Any ,**_snake_case : int ) -> Any:
"""simple docstring"""
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : List[str] ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : str ,_snake_case : List[str] ) -> str:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_snake_case ,_snake_case )
super().__setattr__(_snake_case ,_snake_case )
def __setitem__( self : Union[str, Any] ,_snake_case : List[Any] ,_snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__setitem__(_snake_case ,_snake_case )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_snake_case ,_snake_case )
def UpperCAmelCase ( self : str ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class __A ( A_ ,A_ ):
'''simple docstring'''
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "longest"
lowerCAmelCase : Tuple = "max_length"
lowerCAmelCase : List[str] = "do_not_pad"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = "pt"
lowerCAmelCase : Union[str, Any] = "tf"
lowerCAmelCase : Any = "np"
lowerCAmelCase : Optional[int] = "jax"
class __A :
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[ContextManager] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = context_managers
lowercase__ : int = ExitStack()
def __enter__( self : str ) -> Optional[Any]:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(_snake_case )
def __exit__( self : Union[str, Any] ,*_snake_case : Dict ,**_snake_case : List[str] ) -> int:
"""simple docstring"""
self.stack.__exit__(*_snake_case ,**_snake_case )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = infer_framework(__lowerCamelCase )
if framework == "tf":
lowercase__ : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase__ : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase__ : Optional[int] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = model_class.__name__
lowercase__ : Optional[int] = infer_framework(__lowerCamelCase )
if framework == "tf":
lowercase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowercase__ : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
lowercase__ : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "" , __lowerCamelCase = "." ) -> List[str]:
def _flatten_dict(__lowerCamelCase , __lowerCamelCase="" , __lowerCamelCase="." ):
for k, v in d.items():
lowercase__ : Tuple = str(__lowerCamelCase ) + delimiter + str(__lowerCamelCase ) if parent_key else k
if v and isinstance(__lowerCamelCase , __lowerCamelCase ):
yield from flatten_dict(__lowerCamelCase , __lowerCamelCase , delimiter=__lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
@contextmanager
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = False ) -> List[str]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
if is_numpy_array(__lowerCamelCase ):
return np.transpose(__lowerCamelCase , axes=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.T if axes is None else array.permute(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.transpose(__lowerCamelCase , perm=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.transpose(__lowerCamelCase , axes=__lowerCamelCase )
else:
raise ValueError(f"""Type not supported for transpose: {type(__lowerCamelCase )}.""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if is_numpy_array(__lowerCamelCase ):
return np.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.reshape(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.reshape(__lowerCamelCase , __lowerCamelCase )
else:
raise ValueError(f"""Type not supported for reshape: {type(__lowerCamelCase )}.""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
if is_numpy_array(__lowerCamelCase ):
return np.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(__lowerCamelCase )}.""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if is_numpy_array(__lowerCamelCase ):
return np.expand_dims(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.unsqueeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(__lowerCamelCase )}.""" )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if is_numpy_array(__lowerCamelCase ):
return np.size(__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.numel()
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.size(__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(__lowerCamelCase )}.""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
for key, value in auto_map.items():
if isinstance(__lowerCamelCase , (tuple, list) ):
lowercase__ : List[str] = [f"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase__ : Any = f"""{repo_id}--{value}"""
return auto_map
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
for base_class in inspect.getmro(__lowerCamelCase ):
lowercase__ : Dict = base_class.__module__
lowercase__ : List[Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 122 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="language-modeling" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({} )
lowerCAmelCase : str = "text"
@property
def UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 122 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def A ( UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if "resnet-50" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
lowerCAmelCase__ = DetrConfig(use_timm_backbone=UpperCamelCase_ , backbone_config=UpperCamelCase_ )
# set label attributes
lowerCAmelCase__ = "panoptic" in model_name
if is_panoptic:
lowerCAmelCase__ = 2_50
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "coco-detection-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A ( UpperCamelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=False ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ""
if is_panoptic:
lowerCAmelCase__ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:2_56, :]
lowerCAmelCase__ = in_proj_bias[:2_56]
lowerCAmelCase__ = in_proj_weight[2_56:5_12, :]
lowerCAmelCase__ = in_proj_bias[2_56:5_12]
lowerCAmelCase__ = in_proj_weight[-2_56:, :]
lowerCAmelCase__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:2_56, :]
lowerCAmelCase__ = in_proj_bias[:2_56]
lowerCAmelCase__ = in_proj_weight[2_56:5_12, :]
lowerCAmelCase__ = in_proj_bias[2_56:5_12]
lowerCAmelCase__ = in_proj_weight[-2_56:, :]
lowerCAmelCase__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ = in_proj_weight_cross_attn[:2_56, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[:2_56]
lowerCAmelCase__ = in_proj_weight_cross_attn[2_56:5_12, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[2_56:5_12]
lowerCAmelCase__ = in_proj_weight_cross_attn[-2_56:, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[-2_56:]
def A ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = get_detr_config(UpperCamelCase_ )
# load original model from torch hub
lowerCAmelCase__ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
lowerCAmelCase__ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCamelCase_ ).eval()
lowerCAmelCase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase_ ):
if is_panoptic:
lowerCAmelCase__ = "detr." + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = DetrForSegmentation(UpperCamelCase_ ) if is_panoptic else DetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion on an image
lowerCAmelCase__ = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCAmelCase__ = DetrImageProcessor(format=UpperCamelCase_ )
lowerCAmelCase__ = processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase__ = encoding["pixel_values"]
lowerCAmelCase__ = detr(UpperCamelCase_ )
lowerCAmelCase__ = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 48 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
snake_case_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Dict = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A_ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A_ : str = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def a (self : Any ):
"""simple docstring"""
__snake_case = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
__snake_case = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
__snake_case = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}] )
__snake_case = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(a__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
__snake_case = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
# Legacy behavior
__snake_case = text_classifier('''This is great !''' , return_all_scores=a__ )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
__snake_case = text_classifier('''This is great !''' , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}]] )
__snake_case = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
__snake_case = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
] , )
@require_torch
def a (self : List[str] ):
"""simple docstring"""
import torch
__snake_case = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
__snake_case = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@require_tf
def a (self : Any ):
"""simple docstring"""
__snake_case = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
__snake_case = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@slow
@require_torch
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = pipeline('''text-classification''' )
__snake_case = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__snake_case = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__snake_case = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
@slow
@require_tf
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = pipeline('''text-classification''' , framework='''tf''' )
__snake_case = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__snake_case = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__snake_case = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
def a (self : Any , a__ : int , a__ : Optional[int] , a__ : str ):
"""simple docstring"""
__snake_case = TextClassificationPipeline(model=a__ , tokenizer=a__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def a (self : Tuple , a__ : Dict , a__ : List[str] ):
"""simple docstring"""
__snake_case = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__snake_case = '''HuggingFace is in'''
__snake_case = text_classifier(a__ )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
__snake_case = ['''HuggingFace is in ''', '''Paris is in France''']
__snake_case = text_classifier(a__ )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}, {'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__snake_case = text_classifier(a__ , top_k=a__ )
__snake_case = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(a__ ) , [[{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N, [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N] , )
__snake_case = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
__snake_case = text_classifier(a__ )
self.assertEqual(
nested_simplify(a__ ) , {'''label''': ANY(a__ ), '''score''': ANY(a__ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__snake_case = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(a__ ):
text_classifier(a__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__snake_case = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 717 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[Any] , a__ : Union[str, Any] , a__ : Any=13 , a__ : List[str]=32 , a__ : Union[str, Any]=2 , a__ : int=3 , a__ : Tuple=16 , a__ : Dict=[1, 2, 1] , a__ : Any=[2, 2, 4] , a__ : Optional[int]=2 , a__ : List[Any]=2.0 , a__ : Any=True , a__ : Optional[Any]=0.0 , a__ : Union[str, Any]=0.0 , a__ : Optional[Any]=0.1 , a__ : Any="gelu" , a__ : Optional[int]=False , a__ : Union[str, Any]=True , a__ : List[Any]=0.0_2 , a__ : int=1E-5 , a__ : int=True , a__ : str=None , a__ : Union[str, Any]=True , a__ : Any=10 , a__ : str=8 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = patch_norm
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = is_training
__snake_case = scope
__snake_case = use_labels
__snake_case = type_sequence_label_size
__snake_case = encoder_stride
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : Dict ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a (self : Union[str, Any] , a__ : List[Any] , a__ : str , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = SwinvaModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
__snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a (self : str , a__ : int , a__ : Tuple , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = SwinvaForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case = 1
__snake_case = SwinvaForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a (self : Any , a__ : int , a__ : Dict , a__ : Tuple ):
"""simple docstring"""
__snake_case = self.type_sequence_label_size
__snake_case = SwinvaForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A_ : int = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = False
A_ : Dict = False
A_ : Dict = False
A_ : List[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = SwinvaModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , embed_dim=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def a (self : str ):
"""simple docstring"""
pass
def a (self : Any ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : str ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.attentions
__snake_case = len(self.model_tester.depths )
self.assertEqual(len(a__ ) , a__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = config.window_size**2
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__snake_case = len(a__ )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
__snake_case = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__snake_case = 2
self.assertEqual(out_len + added_hidden_states , len(a__ ) )
__snake_case = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def a (self : Dict , a__ : int , a__ : List[str] , a__ : List[Any] , a__ : Dict ):
"""simple docstring"""
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.hidden_states
__snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swinv2 has a different seq_length
__snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
__snake_case , __snake_case , __snake_case , __snake_case = reshaped_hidden_states[0].shape
__snake_case = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__snake_case = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def a (self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__snake_case = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : List[str] ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = SwinvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a__ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
a__ )
__snake_case = self.default_image_processor
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 388 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = ['''model.decoder.embed_positions.weights''']
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> List[str]:
"""simple docstring"""
if "emb" in name:
_lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __UpperCAmelCase ( snake_case_ : OrderedDict , snake_case_ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_lowerCAmelCase = list(state_dict.keys() )
_lowerCAmelCase = {}
for key in keys:
_lowerCAmelCase = state_dict.pop(snake_case_ )
_lowerCAmelCase = rename_keys(snake_case_ )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase = val[:hidden_size, :]
_lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase = val
else:
_lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def __UpperCAmelCase ( snake_case_ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_lowerCAmelCase = 1024
_lowerCAmelCase = 24
_lowerCAmelCase = 16
elif checkpoint == "medium":
_lowerCAmelCase = 1536
_lowerCAmelCase = 48
_lowerCAmelCase = 24
elif checkpoint == "large":
_lowerCAmelCase = 2048
_lowerCAmelCase = 48
_lowerCAmelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=snake_case_ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , )
return config
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : int=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]="cpu" ) -> Any:
"""simple docstring"""
_lowerCAmelCase = MusicGen.get_pretrained(snake_case_ , device=snake_case_ )
_lowerCAmelCase = decoder_config_from_checkpoint(snake_case_ )
_lowerCAmelCase = fairseq_model.lm.state_dict()
_lowerCAmelCase , _lowerCAmelCase = rename_state_dict(
snake_case_ , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
_lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_lowerCAmelCase = MusicgenForCausalLM(snake_case_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase , _lowerCAmelCase = decoder.load_state_dict(snake_case_ , strict=snake_case_ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=snake_case_ , audio_encoder=snake_case_ , decoder=snake_case_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case_ )
# check we can do a forward pass
_lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase = model(input_ids=snake_case_ , decoder_input_ids=snake_case_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_lowerCAmelCase = MusicgenProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
# set the appropriate bos/pad token ids
_lowerCAmelCase = 2048
_lowerCAmelCase = 2048
# set other default generation config params
_lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase = True
_lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case_ )
processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub) | 156 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE : Union[str, Any] = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE : Tuple = NewType('''DataClassType''', Any)
def __UpperCAmelCase ( snake_case_ : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __UpperCAmelCase ( snake_case_ : list ) -> Callable[[str], Any]:
"""simple docstring"""
_lowerCAmelCase = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ , snake_case_ )
def __UpperCAmelCase ( *,
snake_case_ : Union[str, List[str]] = None , snake_case_ : str = None , snake_case_ : Any = dataclasses.MISSING , snake_case_ : Callable[[], Any] = dataclasses.MISSING , snake_case_ : dict = None , **snake_case_ : Union[str, Any] , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCAmelCase = {}
if aliases is not None:
_lowerCAmelCase = aliases
if help is not None:
_lowerCAmelCase = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
def __init__(self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
if "formatter_class" not in kwargs:
_lowerCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase )
if dataclasses.is_dataclass(lowerCamelCase ):
_lowerCAmelCase = [dataclass_types]
_lowerCAmelCase = list(lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = f"""--{field.name}"""
_lowerCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
_lowerCAmelCase = kwargs.pop("""aliases""" , [] )
if isinstance(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = [aliases]
_lowerCAmelCase = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(lowerCamelCase , """UnionType""" ) and isinstance(lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
_lowerCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCAmelCase = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCAmelCase = (
field.type.__args__[0] if isinstance(lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_lowerCAmelCase = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase ) and issubclass(field.type , lowerCamelCase )):
if origin_type is Literal:
_lowerCAmelCase = field.type.__args__
else:
_lowerCAmelCase = [x.value for x in field.type]
_lowerCAmelCase = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
_lowerCAmelCase = field.default
else:
_lowerCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCAmelCase = copy(lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
_lowerCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCAmelCase = """?"""
# This is the value that will get picked if we do --field_name (without value)
_lowerCAmelCase = True
elif isclass(lowerCamelCase ) and issubclass(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = field.type.__args__[0]
_lowerCAmelCase = """+"""
if field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCAmelCase = True
else:
_lowerCAmelCase = field.type
if field.default is not dataclasses.MISSING:
_lowerCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase = field.default_factory()
else:
_lowerCAmelCase = True
parser.add_argument(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCAmelCase = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if hasattr(lowerCamelCase , """_argument_group_name""" ):
_lowerCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
_lowerCAmelCase = self
try:
_lowerCAmelCase = get_type_hints(lowerCamelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase ):
_lowerCAmelCase = """.""".join(map(lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(lowerCamelCase ):
if not field.init:
continue
_lowerCAmelCase = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase , lowerCamelCase )
def A__ (self , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_lowerCAmelCase = []
if args_filename:
args_files.append(Path(lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCAmelCase = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase , type=lowerCamelCase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCAmelCase , _lowerCAmelCase = args_file_parser.parse_known_args(args=lowerCamelCase )
_lowerCAmelCase = vars(lowerCamelCase ).get(args_file_flag.lstrip("""-""" ) , lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase ) for p in cmd_args_file_paths] )
_lowerCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCAmelCase , _lowerCAmelCase = self.parse_known_args(args=lowerCamelCase )
_lowerCAmelCase = []
for dtype in self.dataclass_types:
_lowerCAmelCase = {f.name for f in dataclasses.fields(lowerCamelCase ) if f.init}
_lowerCAmelCase = {k: v for k, v in vars(lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = dtype(**lowerCamelCase )
outputs.append(lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase = set(args.keys() )
_lowerCAmelCase = []
for dtype in self.dataclass_types:
_lowerCAmelCase = {f.name for f in dataclasses.fields(lowerCamelCase ) if f.init}
_lowerCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_lowerCAmelCase = dtype(**lowerCamelCase )
outputs.append(lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase )}""" )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
with open(Path(lowerCamelCase ) , encoding="""utf-8""" ) as open_json_file:
_lowerCAmelCase = json.loads(open_json_file.read() )
_lowerCAmelCase = self.parse_dict(lowerCamelCase , allow_extra_keys=lowerCamelCase )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase = self.parse_dict(yaml.safe_load(Path(lowerCamelCase ).read_text() ) , allow_extra_keys=lowerCamelCase )
return tuple(lowerCamelCase ) | 156 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 318 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Dict , lowercase : int ) ->List[Any]:
"""simple docstring"""
lowercase__ = BertConfig.from_json_file(lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase__ = BertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318 | 1 |
"""simple docstring"""
_lowerCAmelCase : Any = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_lowerCamelCase : Stack[int] = Stack()
_lowerCamelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
_lowerCamelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Tuple = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : List[str] = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''') | 46 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase__ ( self ) -> List[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> List[str]:
__a = 'sgugger/tiny-distilbert-classification'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , only_pretrain_model=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> int:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(_lowerCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> List[str]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(_lowerCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> List[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ) -> List[str]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(_lowerCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = 'patrickvonplaten/t5-tiny-random'
__a = AutoConfig.from_pretrained(_lowerCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCamelCase__ ( self ) -> str:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> Any:
__a = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowerCAmelCase , save_to_csv=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCAmelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_lowerCAmelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_lowerCAmelCase , 'env.csv' ) , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , 'env.csv' ) ).exists() )
def UpperCamelCase__ ( self ) -> Dict:
__a = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(UpperCamelCase ):
self.assertTrue(hasattr(_lowerCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'current' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCAmelCase , 'log.txt' ) , log_print=_lowerCAmelCase , trace_memory_line_by_line=_lowerCAmelCase , eager_mode=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
__a = TensorFlowBenchmark(_lowerCAmelCase )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , 'log.txt' ) ).exists() )
| 709 |
'''simple docstring'''
import numpy as np
import datasets
UpperCAmelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
UpperCAmelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
UpperCAmelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCamelCase__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
# convert to numpy arrays
__a = np.array(UpperCamelCase )
__a = np.array(UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
__a = X - np.mean(UpperCamelCase )
__a = np.cov(reference_distribution.T )
try:
__a = np.linalg.inv(UpperCamelCase )
except np.linalg.LinAlgError:
__a = np.linalg.pinv(UpperCamelCase )
__a = np.dot(UpperCamelCase , UpperCamelCase )
__a = np.dot(UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 490 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Tuple = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
A__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _snake_case ( lowerCamelCase__ : str ) -> int:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : List[str] =model_type_to_module_name(lowerCamelCase__ )
lowerCamelCase_ : int =importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(lowerCamelCase__ , lowerCamelCase__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase__ , "__name__" , lowerCamelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[int] =importlib.import_module("transformers" )
if hasattr(lowerCamelCase__ , lowerCamelCase__ ):
return getattr(lowerCamelCase__ , lowerCamelCase__ )
return None
def _snake_case ( lowerCamelCase__ : Union[str, os.PathLike] , lowerCamelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[Dict[str, str]] = None , lowerCamelCase__ : Optional[Union[bool, str]] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : List[str] , ) -> Optional[Any]:
lowerCamelCase_ : str =get_file_from_repo(
lowerCamelCase__ , lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , resume_download=lowerCamelCase__ , proxies=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowerCamelCase__ , encoding="utf-8" ) as reader:
return json.load(lowerCamelCase__ )
class lowercase__ :
def __init__( self : str ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : List[str] , **snake_case__ : Dict ):
lowerCamelCase_ : Dict =kwargs.pop("config" , snake_case__ )
lowerCamelCase_ : Dict =kwargs.pop("trust_remote_code" , snake_case__ )
lowerCamelCase_ : Optional[Any] =True
lowerCamelCase_ , lowerCamelCase_ : Tuple =ImageProcessingMixin.get_image_processor_dict(snake_case__ , **snake_case__ )
lowerCamelCase_ : Any =config_dict.get("image_processor_type" , snake_case__ )
lowerCamelCase_ : Any =None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
lowerCamelCase_ : List[str] =config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCamelCase_ : int =config_dict.pop("feature_extractor_type" , snake_case__ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowerCamelCase_ : Any =feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowerCamelCase_ : Union[str, Any] =config_dict["auto_map"]["AutoFeatureExtractor"]
lowerCamelCase_ : str =feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : str =AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.image_processor_type``
lowerCamelCase_ : int =getattr(snake_case__ , "image_processor_type" , snake_case__ )
if hasattr(snake_case__ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowerCamelCase_ : Optional[Any] =config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
lowerCamelCase_ : int =image_processor_class_from_name(snake_case__ )
lowerCamelCase_ : int =image_processor_auto_map is not None
lowerCamelCase_ : Optional[Any] =image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
lowerCamelCase_ : Optional[int] =resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Optional[Any] =get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
lowerCamelCase_ : List[str] =kwargs.pop("code_revision" , snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
lowerCamelCase_ : str =IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : Tuple , snake_case__ : Any ):
IMAGE_PROCESSOR_MAPPING.register(snake_case__ , snake_case__ )
| 153 |
"""simple docstring"""
import math
import qiskit
def _snake_case ( lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
lowerCamelCase_ : Optional[Any] =qiskit.QuantumRegister(4 , "qr" )
lowerCamelCase_ : List[Any] =qiskit.ClassicalRegister(2 , "cr" )
# list the entries
lowerCamelCase_ : Tuple =[input_a, input_a, carry_in]
lowerCamelCase_ : Union[str, Any] =qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase__ ) # measure the last two qbits
lowerCamelCase_ : Any =qiskit.Aer.get_backend("aer_simulator" )
lowerCamelCase_ : str =qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 153 | 1 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase : List[Any] = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase : Any = grid[row_n]
lowerCamelCase : List[Any] = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = grid[row_n]
return grid[-1][-1]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A = None , __A = [] ):
"""simple docstring"""
lowerCamelCase : Any = 0
lowerCamelCase : Optional[int] = choices
lowerCamelCase : Optional[int] = prompt
if sys.platform == "win32":
lowerCamelCase : Any = "*"
else:
lowerCamelCase : Union[str, Any] = "➔ "
def _snake_case ( self , __A , __A = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __A )
else:
forceWrite(self.choices[index] , __A )
def _snake_case ( self , __A ):
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__A )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def _snake_case ( self , __A , __A = 1 ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__A )
move_cursor(__A , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__A )] for number in range(10 )] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
lowerCamelCase : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __A )
else:
return
else:
return
def _snake_case ( self , __A = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowerCamelCase : Any = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__A )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase : str = int(builtins.input() )
except ValueError:
lowerCamelCase : Optional[Any] = default_choice
else:
lowerCamelCase : Optional[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__A , "\n" )
return choice
| 340 | 1 |
class _lowercase :
def __init__( self : Optional[Any] , lowerCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
A_ = n
A_ = [None] * self.n
A_ = 0 # index of the first element
A_ = 0
A_ = 0
def __len__( self : int ) -> int:
"""simple docstring"""
return self.size
def UpperCamelCase ( self : int ) -> bool:
"""simple docstring"""
return self.size == 0
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
A_ = data
A_ = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.size == 0:
raise Exception('''UNDERFLOW''' )
A_ = self.array[self.front]
A_ = None
A_ = (self.front + 1) % self.n
self.size -= 1
return temp
| 563 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowercase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A_ = '''lm_head'''
A_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
A_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
A_ = True
else:
for key, mapped_key in MAPPING.items():
A_ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
A_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
A_ = '''weight_g'''
elif "weight_v" in name:
A_ = '''weight_v'''
elif "bias" in name:
A_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = '''weight'''
else:
A_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = full_name.split('''conv_layers.''' )[-1]
A_ = name.split('''.''' )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
A_ = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
A_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
A_ = Dictionary.load_from_json(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ = target_dict.pad_index
A_ = target_dict.bos_index
A_ = target_dict.eos_index
A_ = len(target_dict.symbols )
A_ = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
A_ = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ = 42
A_ = 43
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
A_ = True if config.feat_extract_norm == '''layer''' else False
A_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
A_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A_ = UniSpeechForCTC(SCREAMING_SNAKE_CASE )
else:
A_ = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned:
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
A_ ,A_ ,A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowercase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 563 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
_lowerCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
while queue:
_lowerCAmelCase = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
if cnt != len(SCREAMING_SNAKE_CASE_ ):
print("Cycle exists" )
else:
print(SCREAMING_SNAKE_CASE_ )
# Adjacency List of Graph
_SCREAMING_SNAKE_CASE = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 18 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
'''simple docstring'''
def a__ ( a__ = 10**12 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 627 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
if len(UpperCamelCase ) <= 1:
return [tuple(UpperCamelCase )]
lowerCamelCase__ : List[Any] = []
def generate(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Tuple = [0] * n
res.append(tuple(UpperCamelCase ) )
lowerCamelCase__ : str = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase__ , lowerCamelCase__ : Any = arr[i], arr[0]
else:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase ) )
c[i] += 1
lowerCamelCase__ : Any = 0
else:
lowerCamelCase__ : Union[str, Any] = 0
i += 1
generate(len(UpperCamelCase ) , UpperCamelCase )
return res
if __name__ == "__main__":
_A : List[str] =input('''Enter numbers separated by a comma:\n''').strip()
_A : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 631 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''CLIPFeatureExtractor''']
lowerCAmelCase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 531 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _A ( _a : Optional[Any] , _a : Optional[Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A = flax_key_tuple[:-1] + ("""weight""",)
A = torch.permute(_a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_a ):
# linear layer
A = flax_key_tuple[:-1] + ("""weight""",)
A = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _A ( _a : Tuple , _a : Any , _a : Any ):
"""simple docstring"""
if "metadata" in layer:
A = layer.split("""metadata""" )
A = """""".join(split_layer[0] )[:-1]
A = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A = layer.split("""kvstore""" )
A = """""".join(split_layer[0] )[:-1]
A = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A = layer.split("""/""" )
A = """/""".join(split_layer[:-1] )
A = (split_layer[-1],)
if "kvstore/path" in layer:
A = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A = """file"""
else:
A = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _A ( _a : Optional[Any] , _a : Optional[int] ):
"""simple docstring"""
A = rename_keys(_a )
A = {}
for k, v in current_block.items():
A = v
A = new_current_block
torch.save(_a , _a )
def _A ( _a : str , _a : Optional[Any] , _a : int , _a : Optional[int] , _a : str = WEIGHTS_NAME ):
"""simple docstring"""
A = convert_file_size_to_int(_a )
A = []
A = {}
A = 0
A = 0
os.makedirs(_a , exist_ok=_a )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A = flatten_dict(_a , sep="""/""" )
A = {}
for layer in checkpoint_info.keys():
A , A , A = get_key_and_tensorstore_dict(
_a , _a , _a )
if curr_real_layer_name in all_layers:
A = content
else:
A = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A = torch.tensor(_a )
A = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A , A = rename_base_flax_keys(tuple(key.split("""/""" ) ) , _a )
A = """/""".join(_a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A = os.path.join(
_a , weights_name.replace(""".bin""" , f'-{len(_a )+1:05d}-of-???.bin' ) )
rename_and_save_block(_a , _a )
sharded_state_dicts.append(current_block.keys() )
del current_block
A = {}
A = 0
A = raw_weights.to(getattr(_a , _a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A = os.path.join(_a , weights_name.replace(""".bin""" , f'-{len(_a )+1:05d}-of-???.bin' ) )
rename_and_save_block(_a , _a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A = {}
A = {}
for idx, shard in enumerate(_a ):
A = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(_a ):05d}.bin' ) # len(sharded_state_dicts):05d}
A = os.path.join(_a , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_a , os.path.join(_a , _a ) )
A = shard
for key in shard:
A = shard_file
# Add the metadata
A = {"""total_size""": total_size}
A = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_a , _a ) , """w""" , encoding="""utf-8""" ) as f:
A = json.dumps(_a , indent=2 , sort_keys=_a ) + """\n"""
f.write(_a )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _A ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A = TaTokenizer.from_pretrained("""t5-small""" )
A = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A = tokenizer(_a , return_tensors="""pt""" ).input_ids
A = model.generate(_a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 617 | 0 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __snake_case ( SCREAMING_SNAKE_CASE: Union[str, Any] ):
"""simple docstring"""
if hor == 128:
_lowerCAmelCase = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_lowerCAmelCase = (32, 128, 256)
_lowerCAmelCase = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
_lowerCAmelCase = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_lowerCAmelCase = (32, 64, 128, 256)
_lowerCAmelCase = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
_lowerCAmelCase = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_lowerCAmelCase = model.state_dict()
_lowerCAmelCase = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
_lowerCAmelCase = UNetaDModel(**__lowerCAmelCase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
_lowerCAmelCase = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
_lowerCAmelCase = model
_lowerCAmelCase = UNetaDModel(**__lowerCAmelCase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 709 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = "mobilenet_v1"
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=224 , UpperCAmelCase_ : Union[str, Any]=1.0 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : Union[str, Any]="relu6" , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[int]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=0.001 , **UpperCAmelCase_ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = depth_multiplier
_lowerCAmelCase = min_depth
_lowerCAmelCase = hidden_act
_lowerCAmelCase = tf_padding
_lowerCAmelCase = classifier_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = version.parse("1.11" )
@property
def __lowerCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __lowerCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __lowerCamelCase ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1E-4
| 491 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase_ ( lowercase_ ):
'''simple docstring'''
def __init__( self) -> Optional[int]:
# test for the above condition
self.test()
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Optional[int] = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase__ : Optional[int] = self.advance()
if not self.does_advance(UpperCamelCase__):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.update(UpperCamelCase__)
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def lowerCAmelCase__ ( self) -> str:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def lowerCAmelCase__ ( self , UpperCamelCase) -> str:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def lowerCAmelCase__ ( self , UpperCamelCase) -> List[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def lowerCAmelCase__ ( self) -> List[str]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def lowerCAmelCase__ ( self) -> Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
@abstractmethod
def lowerCAmelCase__ ( self , UpperCamelCase=False) -> List[str]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
class UpperCamelCase_ ( lowercase_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase) -> int:
super(UpperCamelCase__ , self).__init__()
if not isinstance(UpperCamelCase__ , UpperCamelCase__) or len(UpperCamelCase__) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""")
if any((not isinstance(UpperCamelCase__ , UpperCamelCase__) or token_id < 0) for token_id in token_ids):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""")
UpperCamelCase__ : Any = token_ids
UpperCamelCase__ : Dict = len(self.token_ids)
UpperCamelCase__ : List[str] = -1 # the index of the currently fulfilled step
UpperCamelCase__ : str = False
def lowerCAmelCase__ ( self) -> Tuple:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__ ( self , UpperCamelCase) -> Optional[int]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase__)}""")
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__ ( self , UpperCamelCase) -> List[Any]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase__)}""")
UpperCamelCase__ : int = False
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Any = False
if self.does_advance(UpperCamelCase__):
self.fulfilled_idx += 1
UpperCamelCase__ : Optional[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Any = completed
else:
# failed to make progress.
UpperCamelCase__ : List[Any] = True
self.reset()
return stepped, completed, reset
def lowerCAmelCase__ ( self) -> str:
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Tuple = 0
def lowerCAmelCase__ ( self) -> int:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCAmelCase__ ( self , UpperCamelCase=False) -> Any:
UpperCamelCase__ : Optional[int] = PhrasalConstraint(self.token_ids)
if stateful:
UpperCamelCase__ : Union[str, Any] = self.seqlen
UpperCamelCase__ : Union[str, Any] = self.fulfilled_idx
UpperCamelCase__ : str = self.completed
return new_constraint
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , UpperCamelCase , UpperCamelCase=True) -> Tuple:
UpperCamelCase__ : Tuple = max([len(UpperCamelCase__) for one in nested_token_ids])
UpperCamelCase__ : int = {}
for token_ids in nested_token_ids:
UpperCamelCase__ : Optional[int] = root
for tidx, token_id in enumerate(UpperCamelCase__):
if token_id not in level:
UpperCamelCase__ : int = {}
UpperCamelCase__ : Union[str, Any] = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase__ , UpperCamelCase__):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F""" {nested_token_ids}.""")
UpperCamelCase__ : Optional[Any] = root
def lowerCAmelCase__ ( self , UpperCamelCase) -> Optional[Any]:
UpperCamelCase__ : Tuple = self.trie
for current_token in current_seq:
UpperCamelCase__ : Optional[int] = start[current_token]
UpperCamelCase__ : Optional[int] = list(start.keys())
return next_tokens
def lowerCAmelCase__ ( self , UpperCamelCase) -> str:
UpperCamelCase__ : str = self.next_tokens(UpperCamelCase__)
return len(UpperCamelCase__) == 0
def lowerCAmelCase__ ( self , UpperCamelCase) -> List[Any]:
UpperCamelCase__ : Tuple = list(root.values())
if len(UpperCamelCase__) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase__) for nn in next_nodes])
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase) -> str:
UpperCamelCase__ : Any = self.count_leaves(UpperCamelCase__)
return len(UpperCamelCase__) != leaf_count
class UpperCamelCase_ ( lowercase_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase) -> Optional[int]:
super(UpperCamelCase__ , self).__init__()
if not isinstance(UpperCamelCase__ , UpperCamelCase__) or len(UpperCamelCase__) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""")
if any(not isinstance(UpperCamelCase__ , UpperCamelCase__) for token_ids in nested_token_ids):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""")
if any(
any((not isinstance(UpperCamelCase__ , UpperCamelCase__) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""")
UpperCamelCase__ : Dict = DisjunctiveTrie(UpperCamelCase__)
UpperCamelCase__ : List[str] = nested_token_ids
UpperCamelCase__ : Tuple = self.trie.max_height
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = False
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : List[str] = self.trie.next_tokens(self.current_seq)
if len(UpperCamelCase__) == 0:
return None
else:
return token_list
def lowerCAmelCase__ ( self , UpperCamelCase) -> int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase__)}""")
UpperCamelCase__ : str = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def lowerCAmelCase__ ( self , UpperCamelCase) -> List[Any]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase__)}""")
UpperCamelCase__ : str = False
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Tuple = False
if self.does_advance(UpperCamelCase__):
self.current_seq.append(UpperCamelCase__)
UpperCamelCase__ : List[str] = True
else:
UpperCamelCase__ : int = True
self.reset()
UpperCamelCase__ : List[str] = self.trie.reached_leaf(self.current_seq)
UpperCamelCase__ : Optional[Any] = completed
return stepped, completed, reset
def lowerCAmelCase__ ( self) -> List[str]:
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : List[Any] = []
def lowerCAmelCase__ ( self) -> Any:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def lowerCAmelCase__ ( self , UpperCamelCase=False) -> List[str]:
UpperCamelCase__ : List[str] = DisjunctiveConstraint(self.token_ids)
if stateful:
UpperCamelCase__ : Optional[Any] = self.seqlen
UpperCamelCase__ : Union[str, Any] = self.current_seq
UpperCamelCase__ : Optional[int] = self.completed
return new_constraint
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , UpperCamelCase) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase__ : int = max([c.seqlen for c in constraints])
UpperCamelCase__ : Any = len(UpperCamelCase__)
UpperCamelCase__ : Optional[int] = False
self.init_state()
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : List[str] = [constraint.copy(stateful=UpperCamelCase__) for constraint in self.constraints]
def lowerCAmelCase__ ( self) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def lowerCAmelCase__ ( self) -> List[str]:
UpperCamelCase__ : Union[str, Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase__ : str = constraint.advance()
if isinstance(UpperCamelCase__ , UpperCamelCase__):
token_list.append(UpperCamelCase__)
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
token_list.extend(UpperCamelCase__)
else:
UpperCamelCase__ : int = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase__ , UpperCamelCase__):
token_list.append(UpperCamelCase__)
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
token_list.extend(UpperCamelCase__)
if len(UpperCamelCase__) == 0:
return None
else:
return token_list
def lowerCAmelCase__ ( self , UpperCamelCase) -> List[str]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.add(UpperCamelCase__)
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCAmelCase__ ( self , UpperCamelCase) -> Optional[Any]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""")
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = False, False
if self.completed:
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Optional[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = self.inprogress_constraint.update(UpperCamelCase__)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase__))
UpperCamelCase__ : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
UpperCamelCase__ : Optional[Any] = None
if len(self.pending_constraints) == 0:
# we're done!
UpperCamelCase__ : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(UpperCamelCase__):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = pending_constraint.update(UpperCamelCase__)
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(UpperCamelCase__)
UpperCamelCase__ : Tuple = None
if not complete and stepped:
UpperCamelCase__ : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase__ : Optional[Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase__ : int = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCAmelCase__ ( self , UpperCamelCase=True) -> Dict:
UpperCamelCase__ : List[Any] = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase__ : int = [
constraint.copy(stateful=UpperCamelCase__) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase__ : Optional[Any] = self.inprogress_constraint.copy(stateful=UpperCamelCase__)
UpperCamelCase__ : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 410 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str ) -> Tuple:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase =re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
_UpperCamelCase =input_file.read()
_UpperCamelCase =regexp.search(UpperCamelCase__ )
return match
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase =re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
_UpperCamelCase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase =regexp.finditer(UpperCamelCase__ )
_UpperCamelCase =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self : int ) -> Optional[Any]:
_UpperCamelCase =Path('''./datasets''' )
_UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def UpperCamelCase__ ( self : Any ) -> Optional[int]:
_UpperCamelCase =Path('''./datasets''' )
_UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 404 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[Any] = "M-CLIP"
def __init__( self ,A__=1024 ,A__=768 ,**A__ ):
_A : Tuple = transformerDimSize
_A : Optional[Any] = imageDimSize
super().__init__(**A__ )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : int = MCLIPConfig
def __init__( self ,A__ ,*A__ ,**A__ ):
super().__init__(A__ ,*A__ ,**A__ )
_A : Optional[int] = XLMRobertaModel(A__ )
_A : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def A__ ( self ,A__ ,A__ ):
_A : str = self.transformer(input_ids=A__ ,attention_mask=A__ )[0]
_A : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A__ ), embs
| 711 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a__ (__lowercase :Optional[int] ) -> Any:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> Tuple:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> List[Any]:
_A : Dict = '''mock-s3-bucket'''
_A : List[Any] = f"""s3://{mock_bucket}"""
_A : Tuple = extract_path_from_uri(__lowercase )
assert dataset_path.startswith('''s3://''' ) is False
_A : Tuple = '''./local/path'''
_A : int = extract_path_from_uri(__lowercase )
assert dataset_path == new_dataset_path
def a__ (__lowercase :Tuple ) -> Optional[int]:
_A : Optional[int] = is_remote_filesystem(__lowercase )
assert is_remote is True
_A : Optional[Any] = fsspec.filesystem('''file''' )
_A : List[Any] = is_remote_filesystem(__lowercase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __lowercase )
def a__ (__lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :Any ) -> Any:
_A : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
_A : Tuple = input_paths[compression_fs_class.protocol]
if input_path is None:
_A : Tuple = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
_A : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__lowercase )
assert isinstance(__lowercase , __lowercase )
_A : Tuple = os.path.basename(__lowercase )
_A : List[Any] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__lowercase , '''r''' , encoding='''utf-8''' ) as f, open(__lowercase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a__ (__lowercase :Union[str, Any] , __lowercase :Optional[int] , __lowercase :Optional[int] ) -> Optional[int]:
_A : Any = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
_A : Tuple = compressed_file_paths[protocol]
_A : Tuple = '''dataset.jsonl'''
_A : List[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
_A , *_A : Optional[int] = fsspec.get_fs_token_paths(__lowercase )
assert fs.isfile(__lowercase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a__ (__lowercase :List[Any] , __lowercase :int , __lowercase :int , __lowercase :str ) -> Optional[Any]:
_A : int = hf_api.dataset_info(__lowercase , token=__lowercase )
_A : int = HfFileSystem(repo_info=__lowercase , token=__lowercase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__lowercase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a__ () -> Optional[Any]:
_A : Any = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__lowercase , __lowercase , clobber=__lowercase )
with pytest.warns(__lowercase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__lowercase ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 332 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__A = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__A = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__A = """|""".join(sys.argv[1:])
__A = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__A = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 93 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Union[str, Any]:
_A : List[str] = parent
_A : Optional[int] = batch_size
_A : int = image_size
_A : Optional[Any] = num_channels
_A : Any = embeddings_size
_A : Dict = hidden_sizes
_A : Any = depths
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Tuple = hidden_act
_A : Dict = num_labels
_A : Union[str, Any] = scope
_A : Optional[Any] = len(_a )
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_A : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : Any = RegNetModel(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : str = self.num_labels
_A : Any = RegNetForImageClassification(_a )
model.to(_a )
model.eval()
_A : str = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> str:
_A : Union[str, Any] = self.prepare_config_and_inputs()
_A , _A , _A : Tuple = config_and_inputs
_A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = RegNetModelTester(self )
_A : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> Optional[Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Dict = [*signature.parameters.keys()]
_A : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> str:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[Any]:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def a__ ( self ) -> Optional[int]:
def check_hidden_states_output(_a , _a , _a ):
_A : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[str] = model(**self._prepare_for_class(_a , _a ) )
_A : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Union[str, Any] = layer_type
_A : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Optional[int]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Tuple:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = RegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> str:
_A : Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_A : Any = self.default_image_processor
_A : Optional[int] = prepare_img()
_A : Tuple = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Union[str, Any] = model(**_a )
# verify the logits
_A : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : int = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 307 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase =logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
UpperCamelCase__ : Tuple = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
UpperCamelCase__ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
UpperCamelCase__ : Tuple = key.split('''.''' )
if attributes[0] == "lm_head":
UpperCamelCase__ : Union[str, Any] = prophet
UpperCamelCase__ : int = prophet_old
else:
UpperCamelCase__ : int = prophet.prophetnet
UpperCamelCase__ : str = prophet_old.model
UpperCamelCase__ : Tuple = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase__ : List[str] = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
UpperCamelCase__ : Tuple = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase__ : Dict = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
UpperCamelCase__ : Optional[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase__ : Any = old_model.bias
logger.info(f'''{attribute} is initialized''' )
UpperCamelCase__ : Any = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , '''in_proj_weight''' ):
UpperCamelCase__ : Optional[int] = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase__ : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase__ : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase__ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase__ : List[str] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase__ : Tuple = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
UpperCamelCase__ : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
UpperCamelCase__ : List[Any] = True
break
if attribute.isdigit():
UpperCamelCase__ : int = model[int(UpperCamelCase__ )]
UpperCamelCase__ : int = old_model[int(UpperCamelCase__ )]
else:
UpperCamelCase__ : str = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
UpperCamelCase__ : List[str] = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
UpperCamelCase__ : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase =parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 462 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase ={"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 462 | 1 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def A ( ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=SCREAMING_SNAKE_CASE_ , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=5 )
parser.add_argument("--batch_size" , type=SCREAMING_SNAKE_CASE_ , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument("--freeze" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE_ , default=5E-4 )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=0 )
parser.add_argument("--lr_scheduler_type" , type=SCREAMING_SNAKE_CASE_ , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=SCREAMING_SNAKE_CASE_ , default=10 )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument("--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="./results" )
return parser.parse_args()
UpperCAmelCase__ : Optional[Any] = load("accuracy")
def A ( UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = eval_pred
lowerCAmelCase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
class A ( lowercase__ ):
def __init__( self : Tuple , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = trainer
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , **__magic_name__ : int ):
"""simple docstring"""
if control.should_evaluate:
lowerCAmelCase__ = deepcopy(__lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = get_args()
set_seed(args.seed )
lowerCAmelCase__ = load_dataset("codeparrot/codecomplex" , split="train" )
lowerCAmelCase__ = dataset.train_test_split(test_size=0.2 )
lowerCAmelCase__ = train_test["test"].train_test_split(test_size=0.5 )
lowerCAmelCase__ = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCAmelCase__ = tokenizer.eos_token
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCAmelCase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(UpperCamelCase_ : List[Any] ):
lowerCAmelCase__ = tokenizer(example["src"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=10_24 )
lowerCAmelCase__ = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCAmelCase__ = train_test_validation.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=train_test_validation["train"].column_names , )
lowerCAmelCase__ = DataCollatorWithPadding(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
lowerCAmelCase__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
print("Training..." )
trainer.add_callback(CustomCallback(SCREAMING_SNAKE_CASE_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 48 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _snake_case ( unittest.TestCase):
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Any ):
lowercase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowercase ) )
def A__ ( self : int ):
lowercase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[int] ):
# pass variant but use the non-variant filenames
lowercase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Union[str, Any] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase__ = "fp16"
self.assertFalse(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : int ):
lowercase__ = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : Optional[Any] ):
# pass variant but use the non-variant filenames
lowercase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
lowercase__ = "fp16"
self.assertTrue(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
def A__ ( self : List[Any] ):
lowercase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowercase__ = "fp16"
self.assertFalse(is_safetensors_compatible(__lowercase, variant=__lowercase ) )
| 413 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase : List[str] =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase : Union[str, Any] =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowerCAmelCase : Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : List[Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE__ )
return next_generation
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new("""RGB""" ,(len(cells[0] ), len(SCREAMING_SNAKE_CASE__ )) )
lowerCAmelCase : Tuple = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE__ ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Any = 2_5_5 - cells[y][x] * 2_5_5
lowerCAmelCase : Tuple = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = new_generation(SCREAMING_SNAKE_CASE__ )
return images
if __name__ == "__main__":
lowerCAmelCase : Any =generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = int(_UpperCAmelCase)
if n_element < 1:
UpperCamelCase = ValueError('''a should be a positive number''')
raise my_error
UpperCamelCase = [1]
UpperCamelCase , UpperCamelCase , UpperCamelCase = (0, 0, 0)
UpperCamelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5))
index += 1
return hamming_list
if __name__ == "__main__":
snake_case_ : Optional[int] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
snake_case_ : Any = hamming(int(n))
print('-----------------------------------------------------')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 212 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = RoCBertTokenizer
_snake_case = None
_snake_case = False
_snake_case = True
_snake_case = filter_non_english
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCamelCase = {}
UpperCamelCase = {}
for i, value in enumerate(lowerCamelCase__ ):
UpperCamelCase = i
UpperCamelCase = i
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(lowerCamelCase__ , lowerCamelCase__ , ensure_ascii=lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(lowerCamelCase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase = {}
for i, token in enumerate(lowerCamelCase__ ):
UpperCamelCase = i
UpperCamelCase = RoCBertWordpieceTokenizer(vocab=lowerCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
UpperCamelCase = tokenizer_r.encode_plus(
lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , )
UpperCamelCase = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ , '''do_lower_case''' ) else False
UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''的''', '''人''', '''有''']
UpperCamelCase = ''''''.join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = True
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = False
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase = tokenizer.encode('''你好''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer.encode('''你是谁''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
UpperCamelCase = '''你好,你是谁'''
UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
UpperCamelCase = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase__ )
UpperCamelCase = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase__ )
UpperCamelCase = tokenizer.prepare_for_model(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase = tokenizer.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 212 | 1 |
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = [0] * len(_lowercase )
for i in range(1, len(_lowercase ) ):
# use last results for better performance - dynamic programming
snake_case_ :Any = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case_ :Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case_ :Any = j
return prefix_result
def A_ ( _lowercase ):
'''simple docstring'''
return max(prefix_function(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = sorted(zip(_lowercase, _lowercase ), key=lambda _lowercase : x[0] / x[1], reverse=_lowercase )
snake_case_, snake_case_ :Tuple = [i[0] for i in r], [i[1] for i in r]
snake_case_ :List[Any] = list(accumulate(_lowercase ) )
snake_case_ :str = bisect(_lowercase, _lowercase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
'''simple docstring'''
def snake_case_ ( __snake_case : List[Any] , __snake_case : str) -> Union[str, Any]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2)) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2))
else:
return a * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2)) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2))
def snake_case_ ( __snake_case : Dict , __snake_case : Optional[Any]) -> Tuple:
if b < 0:
return 1 / actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(power(-2, -3))
| 274 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
"""simple docstring"""
def A ( snake_case :int ) -> bool:
return str(snake_case ) == str(snake_case )[::-1]
def A ( snake_case :int ) -> int:
return int(snake_case ) + int(str(snake_case )[::-1] )
def A ( snake_case :int = 1_0_0_0_0 ) -> int:
__UpperCamelCase = []
for num in range(1 , snake_case ):
__UpperCamelCase = 0
__UpperCamelCase = num
while iterations < 5_0:
__UpperCamelCase = sum_reverse(snake_case )
iterations += 1
if is_palindrome(snake_case ):
break
else:
lychrel_nums.append(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 293 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase : Tuple = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase : Dict = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( snake_case :List[str] , snake_case :Optional[int] , snake_case :Optional[Any] ) -> Any:
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__UpperCamelCase = json.load(snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(snake_case )] )
with open(snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case )
if strict and len(snake_case ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(snake_case ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*snake_case , sep='\n' )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 293 | 1 |
def __lowerCamelCase ( __a :str ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __lowerCamelCase ( __a :str ) -> bool:
"""simple docstring"""
A__ = credit_card_number
A__ = 0
A__ = len(__a ) - 2
for i in range(__a , -1 , -2 ):
# double the value of every second digit
A__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
A__ = cc_number[:i] + str(__a ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__a ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def __lowerCamelCase ( __a :str ) -> bool:
"""simple docstring"""
A__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(__a ) <= 1_6:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(__a ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(__a ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 176 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A : Dict = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
A : str = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A : Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : Optional[int] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A : str = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : Optional[Any] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
A : List[Any] = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
A : int = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
A : List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : str = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
A : Optional[int] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
A : Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
A : Optional[int] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
A : List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
A : Optional[int] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
A : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
A : List[Any] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
A : int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
A : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
A : str = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : int = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
A : str = ''''''
A : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
A : int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A : List[Any] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __lowerCamelCase ( __a :Union[str, Any] , __a :List[Any] ) -> List[Any]:
"""simple docstring"""
assert ReadMe.from_string(__a , __a ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __lowerCamelCase ( __a :Any , __a :Union[str, Any] ) -> Dict:
"""simple docstring"""
with pytest.raises(__a , match=re.escape(expected_error.format(path="""root""" ) ) ):
A__ = ReadMe.from_string(__a , __a )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCamelCase ( __a :List[str] , __a :Tuple ) -> Dict:
"""simple docstring"""
with pytest.raises(__a , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__a , __a )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCamelCase ( __a :Any ) -> List[str]:
"""simple docstring"""
ReadMe.from_string(__a , __a , suppress_parsing_errors=__a )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __lowerCamelCase ( __a :List[Any] , __a :Optional[int] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
A__ = ReadMe.from_readme(__a , __a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __lowerCamelCase ( __a :List[str] , __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
A__ = expected_error.format(path=__a )
with pytest.raises(__a , match=re.escape(__a ) ):
A__ = ReadMe.from_readme(__a , __a )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCamelCase ( __a :List[Any] , __a :Any ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
A__ = expected_error.format(path=__a )
with pytest.raises(__a , match=re.escape(__a ) ):
ReadMe.from_readme(__a , __a )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCamelCase ( __a :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a , __a , suppress_parsing_errors=__a )
| 176 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : str = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase : Optional[int] = '''The dog is cute and lives in the garden house'''
__UpperCAmelCase : Any = jnp.array([tokenizer.encode(a_ )] )
__UpperCAmelCase : int = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase : Any = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
__UpperCAmelCase : Optional[int] = model(a_ )['''last_hidden_state''']
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , a_ , atol=1e-3 ) )
| 241 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
__UpperCAmelCase : Dict = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase : int = model(a_ )['''last_hidden_state''']
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 241 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : str , a : str ) -> bool:
"""simple docstring"""
lowercase_ : Union[str, Any] = get_failure_array(a )
# 2) Step through text searching for pattern
lowercase_ : Dict = 0, 0 # index into text, pattern
while i < len(a ):
if pattern[j] == text[i]:
if j == (len(a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase_ : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( a : str ) -> list[int]:
"""simple docstring"""
lowercase_ : int = [0]
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = 1
while j < len(a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase_ : Dict = failure[i - 1]
continue
j += 1
failure.append(a )
return failure
if __name__ == "__main__":
# Test 1)
A: Optional[int] = "abc1abc12"
A: Optional[int] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
A: List[Any] = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A: List[Any] = "ABABX"
A: List[Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
A: Union[str, Any] = "AAAB"
A: Union[str, Any] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
A: Optional[int] = "abcdabcy"
A: Union[str, Any] = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
A: Tuple = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : List[str] = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 563 |
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE ) )
def _A ( ):
"""simple docstring"""
a__ : Any =7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __snake_case ( __A ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Optional[int] = 10
def lowerCAmelCase__ ( self , **_A):
SCREAMING_SNAKE_CASE_ = {
'num_train_timesteps': 1100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**UpperCamelCase__)
return config
def lowerCAmelCase__ ( self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__)
def lowerCAmelCase__ ( self):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__)
def lowerCAmelCase__ ( self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__)
def lowerCAmelCase__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(UpperCamelCase__))
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(UpperCamelCase__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6) < 1E-3
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(prediction_type='v_prediction')
SCREAMING_SNAKE_CASE_ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(UpperCamelCase__))
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(UpperCamelCase__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1) < 1E-3
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter.to(UpperCamelCase__) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(UpperCamelCase__))
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(UpperCamelCase__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6) < 1E-3
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__)
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter.to(UpperCamelCase__) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(UpperCamelCase__)
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(UpperCamelCase__))
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(UpperCamelCase__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1) < 1E-2
| 709 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase__ : int = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case ( unittest.TestCase , lowerCAmelCase__ ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering')
self.tool.setup()
SCREAMING_SNAKE_CASE_ = load_tool('text-question-answering' , remote=_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(_A , 'What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?')
self.assertEqual(_A , 'launched the BigScience Research Workshop')
| 620 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : Any = """ResNetConfig"""
# Base docstring
_UpperCAmelCase : Union[str, Any] = """microsoft/resnet-50"""
_UpperCAmelCase : List[Any] = [1, 20_48, 7, 7]
# Image classification docstring
_UpperCAmelCase : str = """microsoft/resnet-50"""
_UpperCAmelCase : List[str] = """tiger cat"""
_UpperCAmelCase : Union[str, Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ) -> str:
super().__init__()
lowerCamelCase__ : Any = nn.Convad(
_lowerCAmelCase , _lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=kernel_size // 2 , bias=_lowerCAmelCase )
lowerCamelCase__ : int = nn.BatchNormad(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def A_ ( self : List[Any] , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : Tuple = self.convolution(_lowerCAmelCase )
lowerCamelCase__ : str = self.normalization(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig ) -> str:
super().__init__()
lowerCamelCase__ : Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCamelCase__ : str = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCamelCase__ : Optional[Any] = config.num_channels
def A_ ( self : Tuple , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowerCamelCase__ : str = self.embedder(_lowerCAmelCase )
lowerCamelCase__ : str = self.pooler(_lowerCAmelCase )
return embedding
class lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 ) -> Tuple:
super().__init__()
lowerCamelCase__ : Any = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 , stride=_lowerCAmelCase , bias=_lowerCAmelCase )
lowerCamelCase__ : List[Any] = nn.BatchNormad(_lowerCAmelCase )
def A_ ( self : int , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : List[Any] = self.convolution(_lowerCAmelCase )
lowerCamelCase__ : int = self.normalization(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ) -> Optional[int]:
super().__init__()
lowerCamelCase__ : Tuple = in_channels != out_channels or stride != 1
lowerCamelCase__ : Optional[Any] = (
ResNetShortCut(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ : Union[str, Any] = nn.Sequential(
ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , activation=_lowerCAmelCase ) , )
lowerCamelCase__ : Optional[Any] = ACTaFN[activation]
def A_ ( self : List[str] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Optional[int] = hidden_state
lowerCamelCase__ : Union[str, Any] = self.layer(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = self.shortcut(_lowerCAmelCase )
hidden_state += residual
lowerCamelCase__ : Dict = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" , UpperCAmelCase : int = 4 ) -> Any:
super().__init__()
lowerCamelCase__ : int = in_channels != out_channels or stride != 1
lowerCamelCase__ : List[str] = out_channels // reduction
lowerCamelCase__ : List[str] = (
ResNetShortCut(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ : str = nn.Sequential(
ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase ) , )
lowerCamelCase__ : Optional[Any] = ACTaFN[activation]
def A_ ( self : Optional[int] , UpperCAmelCase : List[str] ) -> Tuple:
lowerCamelCase__ : str = hidden_state
lowerCamelCase__ : Union[str, Any] = self.layer(_lowerCAmelCase )
lowerCamelCase__ : str = self.shortcut(_lowerCAmelCase )
hidden_state += residual
lowerCamelCase__ : Optional[Any] = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : ResNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , ) -> List[str]:
super().__init__()
lowerCamelCase__ : List[str] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
lowerCamelCase__ : List[str] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , activation=config.hidden_act ) , *[layer(_lowerCAmelCase , _lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A_ ( self : Optional[int] , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : Optional[int] = input
for layer in self.layers:
lowerCamelCase__ : Tuple = layer(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig ) -> List[str]:
super().__init__()
lowerCamelCase__ : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase__ : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase ) )
def A_ ( self : Any , UpperCAmelCase : Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
lowerCamelCase__ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__ : List[Any] = hidden_states + (hidden_state,)
lowerCamelCase__ : Dict = stage_module(_lowerCAmelCase )
if output_hidden_states:
lowerCamelCase__ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase , )
class lowerCAmelCase ( _lowerCAmelCase ):
UpperCAmelCase__ = ResNetConfig
UpperCAmelCase__ = 'resnet'
UpperCAmelCase__ = 'pixel_values'
UpperCAmelCase__ = True
def A_ ( self : List[str] , UpperCAmelCase : Any ) -> Optional[int]:
if isinstance(_lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A_ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int]=False ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ : Dict = value
_UpperCAmelCase : str = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : List[str] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""", _lowerCAmelCase, )
class lowerCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> str:
super().__init__(_lowerCAmelCase )
lowerCamelCase__ : str = config
lowerCamelCase__ : List[Any] = ResNetEmbeddings(_lowerCAmelCase )
lowerCamelCase__ : List[str] = ResNetEncoder(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A_ ( self : Dict , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Union[str, Any] = self.embedder(_lowerCAmelCase )
lowerCamelCase__ : Optional[int] = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
lowerCamelCase__ : Any = encoder_outputs[0]
lowerCamelCase__ : str = self.pooler(_lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """, _lowerCAmelCase, )
class lowerCAmelCase ( _lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase : int ) -> Optional[int]:
super().__init__(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = config.num_labels
lowerCamelCase__ : Optional[int] = ResNetModel(_lowerCAmelCase )
# classification head
lowerCamelCase__ : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A_ ( self : int , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCamelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Union[str, Any] = self.resnet(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
lowerCamelCase__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ : Dict = self.classifier(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__ : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__ : Dict = 'single_label_classification'
else:
lowerCamelCase__ : Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCamelCase__ : List[Any] = MSELoss()
if self.num_labels == 1:
lowerCamelCase__ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase__ : List[Any] = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__ : Dict = CrossEntropyLoss()
lowerCamelCase__ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__ : Union[str, Any] = BCEWithLogitsLoss()
lowerCamelCase__ : int = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
lowerCamelCase__ : Union[str, Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n """, _lowerCAmelCase, )
class lowerCAmelCase ( _lowerCAmelCase, _lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase : Tuple ) -> str:
super().__init__(_lowerCAmelCase )
super()._init_backbone(_lowerCAmelCase )
lowerCamelCase__ : Optional[int] = [config.embedding_size] + config.hidden_sizes
lowerCamelCase__ : Optional[int] = ResNetEmbeddings(_lowerCAmelCase )
lowerCamelCase__ : int = ResNetEncoder(_lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@replace_return_docstrings(output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def A_ ( self : int , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ) -> BackboneOutput:
lowerCamelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : Tuple = self.embedder(_lowerCAmelCase )
lowerCamelCase__ : List[str] = self.encoder(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
lowerCamelCase__ : Dict = outputs.hidden_states
lowerCamelCase__ : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase__ : str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_lowerCAmelCase , )
| 295 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase = len(lowerCamelCase )
__lowercase = max(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# create the counting array
__lowercase = coll_max + 1 - coll_min
__lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase ):
__lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase ) ):
__lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join([chr(lowerCamelCase ) for i in counting_sort([ord(lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
__UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 80 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline | 55 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ ) | 55 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
lowerCAmelCase = set()
for token in tokens:
lowerCAmelCase = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
lowerCAmelCase = list(_UpperCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
lowerCAmelCase = bert_tokens
lowerCAmelCase ,lowerCAmelCase = 0, len(_UpperCAmelCase )
while start < end:
lowerCAmelCase = True
if is_chinese(bert_word[start] ):
lowerCAmelCase = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
lowerCAmelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase = '##' + bert_word[j]
lowerCAmelCase = start + i
lowerCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : LTP , _UpperCAmelCase : BertTokenizer ):
lowerCAmelCase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
lowerCAmelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = []
for i in range(0 , len(_UpperCAmelCase ) , 100 ):
lowerCAmelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = []
for id in input_ids:
lowerCAmelCase = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
lowerCAmelCase = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
lowerCAmelCase = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__UpperCamelCase : Optional[int] = parser.parse_args()
main(args)
| 4 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCamelCase : List[str] = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowerCamelCase : Optional[Any] = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowerCamelCase : List[str] = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowerCamelCase : Tuple = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowerCamelCase : List[Any] = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowerCamelCase : Any = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowerCamelCase : Tuple = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def A__ ( ):
'''simple docstring'''
snake_case__ , snake_case__ : Any =randrange(len(_a ) ), randrange(len(_a ) )
snake_case__ : Tuple =["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ : Union[str, Any] =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def A__ ( _a : int = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(_a ))
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : List[Any] , _a : Any ):
'''simple docstring'''
assert PokerHand(_a )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : Union[str, Any] , _a : int ):
'''simple docstring'''
assert PokerHand(_a )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , _a )
def A__ ( _a : Optional[int] , _a : Tuple , _a : Tuple ):
'''simple docstring'''
snake_case__ : Any =PokerHand(_a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : Any , _a : Tuple ):
'''simple docstring'''
assert PokerHand(_a )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , _a )
def A__ ( _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
assert PokerHand(_a )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , _a )
def A__ ( _a : str , _a : Tuple , _a : Union[str, Any] ):
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def A__ ( _a : Any , _a : Optional[Any] , _a : str ):
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
def A__ ( ):
'''simple docstring'''
snake_case__ : str =[PokerHand(_a ) for hand in SORTED_HANDS]
snake_case__ : List[str] =poker_hands.copy()
shuffle(_a )
snake_case__ : Any =chain(sorted(_a ) )
for index, hand in enumerate(_a ):
assert hand == poker_hands[index]
def A__ ( ):
'''simple docstring'''
snake_case__ : Tuple =[PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def A__ ( ):
'''simple docstring'''
snake_case__ : Optional[int] =PokerHand("""2C 4S AS 3D 5C""" )
snake_case__ : Optional[Any] =True
snake_case__ : Any =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def A__ ( ):
'''simple docstring'''
snake_case__ : Tuple =0
snake_case__ : int =os.path.abspath(os.path.dirname(_a ) )
snake_case__ : List[Any] =os.path.join(_a , """poker_hands.txt""" )
with open(_a ) as file_hand:
for line in file_hand:
snake_case__ : List[Any] =line[:14].strip()
snake_case__ : Any =line[15:].strip()
snake_case__ , snake_case__ : str =PokerHand(_a ), PokerHand(_a )
snake_case__ : Optional[Any] =player.compare_with(_a )
if output == "Win":
answer += 1
assert answer == 376
| 385 | 0 |
import string
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
A = ''
for symbol in message:
if symbol in string.ascii_uppercase:
A = string.ascii_uppercase.find(lowerCAmelCase__ )
A = num - key
if num < 0:
A = num + len(string.ascii_uppercase )
A = translated + string.ascii_uppercase[num]
else:
A = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = input('Encrypted message: ' )
A = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , __UpperCamelCase : str , __UpperCamelCase : Any=13 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : int=99 , __UpperCamelCase : Any=32 , __UpperCamelCase : int=2 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : int=2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : List[str]=None , ) -> Union[str, Any]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = 99
A = 384
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.0_2
A = 3
A = 4
A = 128
A = 2
A = 9
A = 1
A = None
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
A = TFConvBertModel(config=__UpperCamelCase )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = [input_ids, input_mask]
A = model(__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) -> str:
A = TFConvBertForMaskedLM(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple:
A = self.num_labels
A = TFConvBertForSequenceClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> Tuple:
A = self.num_choices
A = TFConvBertForMultipleChoice(config=__UpperCamelCase )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Any:
A = self.num_labels
A = TFConvBertForTokenClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ) -> str:
A = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Any ) -> List[str]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : int = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Optional[int] = False
A_ : Any = False
A_ : str = False
def __UpperCamelCase ( self : int ) -> Any:
A = TFConvBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Dict ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = True
if hasattr(__UpperCamelCase , 'use_cache' ):
A = True
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
for model_class in self.all_model_classes:
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
A = model_class(__UpperCamelCase )
A = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
A = os.path.join(__UpperCamelCase , 'saved_model' , '1' )
A = tf.keras.models.load_model(__UpperCamelCase )
A = model(__UpperCamelCase )
if self.is_encoder_decoder:
A = outputs['encoder_hidden_states']
A = outputs['encoder_attentions']
else:
A = outputs['hidden_states']
A = outputs['attentions']
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self : str ) -> str:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase : List[Any] ):
A = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
A = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase : Dict ):
A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A = True
A = False
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(__UpperCamelCase )[0]
A = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
A = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) | 224 | 0 |
def _lowercase( __a : int ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
a__ =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
a__ =1
if upper_limit > 0:
a__ =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_lowerCAmelCase: str = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = KandinskyVaaPriorPipeline
lowerCAmelCase : Dict = ["""prompt"""]
lowerCAmelCase : List[Any] = ["""prompt""", """negative_prompt"""]
lowerCAmelCase : Optional[int] = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase : Dict = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCAmelCase__ )
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
A__ = PriorTransformer(**UpperCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A__ = CLIPVisionModelWithProjection(UpperCAmelCase__ )
return model
@property
def __A ( self ):
A__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def __A ( self ):
A__ = self.dummy_prior
A__ = self.dummy_image_encoder
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_image_processor
A__ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase__ , clip_sample_range=10.0 , )
A__ = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
A__ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
A__ = output.image_embeds
A__ = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -10:]
A__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = True
A__ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
| 491 | 0 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''AutoImageProcessor'''
lowerCamelCase__ = '''AutoTokenizer'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[int] = kwargs.pop("""feature_extractor""" )
snake_case__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.image_processor
snake_case__ : int = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = kwargs.pop("""images""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = kwargs.pop("""text""" , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : List[Any] = args[0]
snake_case__ : List[str] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
snake_case__ : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
snake_case__ : List[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ : Optional[int] = encodings["""input_ids"""]
return inputs
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def __UpperCamelCase ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
snake_case__ : str = True
snake_case__ : List[Any] = self.tokenizer
yield
snake_case__ : Tuple = self.image_processor
snake_case__ : List[Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
snake_case__ : Optional[Any] = self.tokenizer.get_added_vocab()
snake_case__ : Any = {}
while tokens:
snake_case__ : Optional[int] = re.search(R"""<s_(.*?)>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
snake_case__ : Optional[int] = start_token.group(1 )
snake_case__ : Optional[Any] = re.search(Rf"</s_{key}>" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
snake_case__ : Dict = start_token.group()
if end_token is None:
snake_case__ : Dict = tokens.replace(__SCREAMING_SNAKE_CASE , """""" )
else:
snake_case__ : int = end_token.group()
snake_case__ : List[str] = re.escape(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = re.escape(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
snake_case__ : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ : str = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
snake_case__ : Union[str, Any] = value[0]
snake_case__ : List[str] = value
else: # leaf nodes
snake_case__ : Optional[int] = []
for leaf in content.split(R"""<sep/>""" ):
snake_case__ : Any = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
snake_case__ : Union[str, Any] = output[key][0]
snake_case__ : Dict = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 419 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> int:
'''simple docstring'''
with open(__magic_name__ ) as metadata_file:
snake_case__ : Optional[Any] = json.load(__magic_name__ )
snake_case__ : Tuple = LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
snake_case__ : Tuple = torch.load(__magic_name__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
snake_case__ : Any = load_original_entity_vocab(__magic_name__ )
# add an entry for [MASK2]
snake_case__ : List[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case__ : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case__ : Optional[Any] = AddedToken("""<ent>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case__ : Any = AddedToken("""<ent2>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """r""" ) as f:
snake_case__ : Union[str, Any] = json.load(__magic_name__ )
snake_case__ : Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
snake_case__ : List[Any] = MLukeTokenizer.from_pretrained(__magic_name__ )
# Initialize the embeddings of the special tokens
snake_case__ : List[str] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
snake_case__ : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""]
snake_case__ : List[str] = word_emb[ent_init_index].unsqueeze(0 )
snake_case__ : List[str] = word_emb[enta_init_index].unsqueeze(0 )
snake_case__ : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case__ : List[str] = state_dict[bias_name]
snake_case__ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case__ : Dict = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case__ : str = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case__ : Union[str, Any] = f"encoder.layer.{layer_index}.attention.self."
snake_case__ : Tuple = state_dict[prefix + matrix_name]
snake_case__ : str = state_dict[prefix + matrix_name]
snake_case__ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case__ : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
snake_case__ : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case__ : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case__ : Optional[Any] = state_dict["""entity_predictions.bias"""]
snake_case__ : Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case__ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case__ : int = LukeForMaskedLM(config=__magic_name__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
snake_case__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
snake_case__ : Optional[Any] = state_dict[key]
else:
snake_case__ : Optional[int] = state_dict[key]
snake_case__ , snake_case__ : Any = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if set(__magic_name__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__magic_name__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case__ : List[Any] = MLukeTokenizer.from_pretrained(__magic_name__ , task="""entity_classification""" )
snake_case__ : int = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
snake_case__ : Union[str, Any] = (0, 9)
snake_case__ : str = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
snake_case__ : List[Any] = model(**__magic_name__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : List[Any] = torch.Size((1, 33, 7_68) )
snake_case__ : Optional[int] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : Tuple = torch.Size((1, 1, 7_68) )
snake_case__ : int = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case__ : Optional[Any] = MLukeTokenizer.from_pretrained(__magic_name__ )
snake_case__ : Any = """Tokyo is the capital of <mask>."""
snake_case__ : str = (24, 30)
snake_case__ : List[str] = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
snake_case__ : Optional[int] = model(**__magic_name__ )
snake_case__ : List[Any] = encoding["""input_ids"""][0].tolist()
snake_case__ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
snake_case__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__magic_name__ )
snake_case__ : str = outputs.entity_logits[0][0].argmax().item()
snake_case__ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__magic_name__ ) )
model.save_pretrained(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = ["""[MASK]""", """[PAD]""", """[UNK]"""]
snake_case__ : str = [json.loads(__magic_name__ ) for line in open(__magic_name__ )]
snake_case__ : List[str] = {}
for entry in data:
snake_case__ : Dict = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case__ : List[Any] = entity_id
break
snake_case__ : Optional[Any] = f"{language}:{entity_name}"
snake_case__ : Any = entity_id
return new_mapping
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 419 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ : Union[str, Any] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ : List[str] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase_ ( A ):
'''simple docstring'''
def remove_articles(A ):
_a : Optional[Any] = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(A , ' ' , A )
def white_space_fix(A ):
return " ".join(text.split() )
def remove_punc(A ):
_a : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A ) ) ) )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return int(normalize_answer(A ) == normalize_answer(A ) )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[Any] = [any(compute_exact(A , A ) for ref in refs ) for pred, refs in zip(A , A )]
return (sum(A ) / len(A )) * 1_0_0
def UpperCAmelCase_ ( A , A , A , A ):
'''simple docstring'''
_a : Optional[int] = [rgram for rgrams in rgramslist for rgram in rgrams]
_a : List[str] = Counter(A )
_a : Dict = Counter(A )
_a : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
_a : str = scount * numref
_a : Union[str, Any] = Counter(A )
_a : List[str] = Counter()
for cgram, ccount in cgramcounter.items():
_a : Tuple = ccount * numref
# KEEP
_a : Any = sgramcounter_rep & cgramcounter_rep
_a : Optional[int] = keepgramcounter_rep & rgramcounter
_a : Optional[int] = sgramcounter_rep & rgramcounter
_a : Any = 0
_a : List[str] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a : int = 1
_a : int = 1
if len(A ) > 0:
_a : Any = keeptmpscorea / len(A )
if len(A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a : Optional[int] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a : Optional[Any] = sgramcounter_rep - cgramcounter_rep
_a : List[str] = delgramcounter_rep - rgramcounter
_a : Union[str, Any] = sgramcounter_rep - rgramcounter
_a : Any = 0
_a : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a : Tuple = 1
if len(A ) > 0:
_a : Optional[int] = deltmpscorea / len(A )
# ADDITION
_a : List[Any] = set(A ) - set(A )
_a : List[Any] = set(A ) & set(A )
_a : List[Any] = set(A ) - set(A )
_a : Optional[int] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a : Tuple = 1
_a : Optional[Any] = 1
if len(A ) > 0:
_a : Optional[int] = addtmpscore / len(A )
if len(A ) > 0:
_a : Dict = addtmpscore / len(A )
_a : str = 0
if addscore_precision > 0 or addscore_recall > 0:
_a : Dict = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : int = len(A )
_a : Optional[Any] = ssent.split(' ' )
_a : Dict = csent.split(' ' )
_a : str = []
_a : Optional[Any] = []
_a : Dict = []
_a : Any = []
_a : Tuple = []
_a : int = []
_a : List[Any] = []
_a : Optional[Any] = []
_a : int = []
_a : Any = []
for rsent in rsents:
_a : str = rsent.split(' ' )
_a : Tuple = []
_a : Optional[Any] = []
_a : Tuple = []
ragramslist.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
_a : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(A )
if i < len(A ) - 2:
_a : Optional[int] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(A )
if i < len(A ) - 3:
_a : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(A )
ragramslist.append(A )
ragramslist.append(A )
ragramslist.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
_a : List[str] = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(A )
if i < len(A ) - 2:
_a : List[Any] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(A )
if i < len(A ) - 3:
_a : Tuple = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
_a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(A )
if i < len(A ) - 2:
_a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(A )
if i < len(A ) - 3:
_a : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(A )
((_a) , (_a) , (_a)) : Any = SARIngram(A , A , A , A )
((_a) , (_a) , (_a)) : str = SARIngram(A , A , A , A )
((_a) , (_a) , (_a)) : Dict = SARIngram(A , A , A , A )
((_a) , (_a) , (_a)) : Tuple = SARIngram(A , A , A , A )
_a : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a : str = sum([delascore, delascore, delascore, delascore] ) / 4
_a : Dict = sum([addascore, addascore, addascore, addascore] ) / 4
_a : List[str] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase_ ( A , A = True , A = "13a" , A = True ):
'''simple docstring'''
if lowercase:
_a : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a : Dict = sacrebleu.metrics.bleu._get_tokenizer(A )()(A )
else:
_a : List[Any] = sacrebleu.TOKENIZERS[tokenizer]()(A )
elif tokenizer == "moses":
_a : List[Any] = sacremoses.MosesTokenizer().tokenize(A , return_str=A , escape=A )
elif tokenizer == "penn":
_a : Dict = sacremoses.MosesTokenizer().penn_tokenize(A , return_str=A )
else:
_a : List[Any] = sentence
if not return_str:
_a : str = normalized_sent.split()
return normalized_sent
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
if not (len(A ) == len(A ) == len(A )):
raise ValueError('Sources length must match predictions and references lengths.' )
_a : Dict = 0
for src, pred, refs in zip(A , A , A ):
sari_score += SARIsent(normalize(A ) , normalize(A ) , [normalize(A ) for sent in refs] )
_a : str = sari_score / len(A )
return 1_0_0 * sari_score
def UpperCAmelCase_ ( A , A , A="exp" , A=None , A=False , A=False , A=False , ):
'''simple docstring'''
_a : List[str] = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_a : int = [[refs[i] for refs in references] for i in range(A )]
_a : Optional[int] = sacrebleu.corpus_bleu(
A , A , smooth_method=A , smooth_value=A , force=A , lowercase=A , use_effective_order=A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_a : Optional[int] = {}
result.update({'sari': compute_sari(sources=lowerCamelCase_ , predictions=lowerCamelCase_ , references=lowerCamelCase_ )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=lowerCamelCase_ , references=lowerCamelCase_ )} )
result.update({'exact': compute_em(predictions=lowerCamelCase_ , references=lowerCamelCase_ )} )
return result
| 120 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 120 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( UpperCAmelCase ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''BlipImageProcessor'''
__magic_name__ = '''AutoTokenizer'''
def __init__( self : int , A : str , A : Union[str, Any] ):
'''simple docstring'''
a : str = False
super().__init__(A , A )
a : List[Any] = self.image_processor
def __call__( self : Dict , A : ImageInput = None , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : str , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
a : Tuple = self.tokenizer
a : List[str] = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
return text_encoding
# add pixel_values
a : Tuple = self.image_processor(A , return_tensors=A )
if text is not None:
a : Any = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
else:
a : str = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def lowerCamelCase__ ( self : List[str] , *A : Union[str, Any] , **A : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A , **A )
def lowerCamelCase__ ( self : str , *A : Optional[int] , **A : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[int] = self.tokenizer.model_input_names
a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 118 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 118 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = get_activation("""swish""" )
self.assertIsInstance(_lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = get_activation("""silu""" )
self.assertIsInstance(_lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = get_activation("""mish""" )
self.assertIsInstance(_lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = get_activation("""gelu""" )
self.assertIsInstance(_lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 79 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''distilbert'''
SCREAMING_SNAKE_CASE_ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=4 * 7_6_8 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.2 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : Dict = max_position_embeddings
UpperCamelCase__ : Any = sinusoidal_pos_embds
UpperCamelCase__ : Dict = n_layers
UpperCamelCase__ : List[Any] = n_heads
UpperCamelCase__ : Dict = dim
UpperCamelCase__ : Dict = hidden_dim
UpperCamelCase__ : Optional[int] = dropout
UpperCamelCase__ : Optional[Any] = attention_dropout
UpperCamelCase__ : Tuple = activation
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Optional[int] = qa_dropout
UpperCamelCase__ : str = seq_classif_dropout
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase__ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 285 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : str = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 57 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 57 | 1 |
"""simple docstring"""
from functools import lru_cache
def snake_case ( lowerCAmelCase_ ) -> set:
_snake_case = 2
_snake_case = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase_ )
if n > 1:
factors.add(lowerCAmelCase_ )
return factors
@lru_cache
def snake_case ( lowerCAmelCase_ ) -> int:
return len(unique_prime_factors(lowerCAmelCase_ ) )
def snake_case ( lowerCAmelCase_ ) -> bool:
return len(set(lowerCAmelCase_ ) ) in (0, 1)
def snake_case ( lowerCAmelCase_ ) -> list:
_snake_case = 2
while True:
# Increment each value of a generated range
_snake_case = [base + i for i in range(lowerCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_snake_case = [upf_len(lowerCAmelCase_ ) for x in group]
checker.append(lowerCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def snake_case ( lowerCAmelCase_ = 4 ) -> int:
_snake_case = run(lowerCAmelCase_ )
return results[0] if len(lowerCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 103 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.conv_bias
SCREAMING_SNAKE_CASE__ = eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE__ = [x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = 'gelu'
SCREAMING_SNAKE_CASE__ = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = fs_config.activation_fn.name
SCREAMING_SNAKE_CASE__ = fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE__ = 1e-5
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layers
SCREAMING_SNAKE_CASE__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.final_dropout
SCREAMING_SNAKE_CASE__ = fs_config.layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.activation_dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE__ = fs_config.attention_dropout
SCREAMING_SNAKE_CASE__ = fs_config.dropout_input
SCREAMING_SNAKE_CASE__ = fs_config.dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE__ = fs_config.mask_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2FeatureExtractor'
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> List[str]:
'''simple docstring'''
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SEWConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = convert_config(model[0] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = SEWForCTC(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SEWModel(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 472 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : str = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _lowerCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
A__ = StableDiffusionLatentUpscalePipeline
A__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A__ = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
A__ = True
@property
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : Any = 4
lowerCAmelCase__ : Optional[int] = (16, 16)
lowerCAmelCase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
def __magic_name__( self ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__UpperCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__UpperCAmelCase , only_cross_attention=__UpperCAmelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
lowerCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
lowerCAmelCase__ : Union[str, Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
lowerCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
lowerCAmelCase__ : Dict = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase__ : str = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
if str(__UpperCAmelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Dict = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ : str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self ):
lowerCAmelCase__ : Any = '''cpu'''
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowerCAmelCase__ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowerCAmelCase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1e-3 )
def __magic_name__( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __magic_name__( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __magic_name__( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __magic_name__( self ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __magic_name__( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __magic_name__( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __magic_name__( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __magic_name__( self ):
lowerCAmelCase__ : str = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = self.pipeline_class(**__UpperCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Any = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCAmelCase__ : List[str] = getattr(__UpperCAmelCase , scheduler_enum.name )
lowerCAmelCase__ : List[Any] = scheduler_cls.from_config(pipe.scheduler.config )
lowerCAmelCase__ : str = pipe(**__UpperCAmelCase )[0]
outputs.append(__UpperCAmelCase )
assert check_same_shape(__UpperCAmelCase )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self ):
lowerCAmelCase__ : Dict = torch.manual_seed(33 )
lowerCAmelCase__ : str = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowerCAmelCase__ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase__ : Union[str, Any] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowerCAmelCase__ : int = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''latent''' ).images
lowerCAmelCase__ : List[str] = upscaler(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=__UpperCAmelCase , output_type='''np''' , ).images[0]
lowerCAmelCase__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __magic_name__( self ):
lowerCAmelCase__ : Any = torch.manual_seed(33 )
lowerCAmelCase__ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase__ : List[str] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowerCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowerCAmelCase__ : Any = upscaler(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=__UpperCAmelCase , output_type='''np''' , ).images[0]
lowerCAmelCase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 721 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 10
lowerCAmelCase__ : Optional[Any] = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
lowerCAmelCase__ : List[Any] = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(UpperCamelCase ) ),
} , features=UpperCamelCase , )
return dataset
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=UpperCamelCase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ = """\
Text data.
Second line of data."""
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
lowerCAmelCase__ : int = FILE_CONTENT
with open(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase )
return filename
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]:
import bza
lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
lowerCAmelCase__ : Optional[Any] = bytes(UpperCamelCase , '''utf-8''' )
with bza.open(UpperCamelCase , '''wb''' ) as f:
f.write(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
import gzip
lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
lowerCAmelCase__ : Any = bytes(UpperCamelCase , '''utf-8''' )
with gzip.open(UpperCamelCase , '''wb''' ) as f:
f.write(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
lowerCAmelCase__ : Tuple = bytes(UpperCamelCase , '''utf-8''' )
with lza.frame.open(UpperCamelCase , '''wb''' ) as f:
f.write(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase__ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(UpperCamelCase , '''w''' ) as archive:
archive.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
import tarfile
lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(UpperCamelCase , '''w''' ) as f:
f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
import lzma
lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
lowerCAmelCase__ : str = bytes(UpperCamelCase , '''utf-8''' )
with lzma.open(UpperCamelCase , '''wb''' ) as f:
f.write(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
import zipfile
lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
lowerCAmelCase__ : int = bytes(UpperCamelCase , '''utf-8''' )
with zstd.open(UpperCamelCase , '''wb''' ) as f:
f.write(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
lowerCAmelCase__ : Tuple = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase )
return filename
lowerCAmelCase_ = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
lowerCAmelCase_ = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
lowerCAmelCase_ = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
lowerCAmelCase_ = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( ) -> Tuple:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
lowerCAmelCase__ : str = datasets.Dataset.from_dict(UpperCamelCase )
lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(UpperCamelCase ) ) as con:
lowerCAmelCase__ : int = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> int:
lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(UpperCamelCase , '''w''' , newline='''''' ) as f:
lowerCAmelCase__ : List[str] = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(UpperCamelCase , '''w''' , newline='''''' ) as f:
lowerCAmelCase__ : Tuple = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]:
import bza
lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(UpperCamelCase , '''rb''' ) as f:
lowerCAmelCase__ : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase , '''wb''' ) as f:
f.write(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(UpperCamelCase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) )
f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
lowerCAmelCase__ : List[Any] = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(UpperCamelCase , '''wb''' ) as f:
lowerCAmelCase__ : str = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase )
lowerCAmelCase__ : List[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase ) )] for k in DATA[0]} , schema=UpperCamelCase )
writer.write_table(UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCAmelCase__ : List[str] = {'''data''': DATA}
with open(UpperCamelCase , '''w''' ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCAmelCase__ : Optional[Any] = {'''data''': DATA_DICT_OF_LISTS}
with open(UpperCamelCase , '''w''' ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> int:
lowerCAmelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(UpperCamelCase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> str:
lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(UpperCamelCase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(UpperCamelCase , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(UpperCamelCase , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> int:
import gzip
lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(UpperCamelCase , '''rb''' ) as orig_file:
with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
import gzip
lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(UpperCamelCase , '''rb''' ) as orig_file:
with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) )
f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(UpperCamelCase , '''w''' ) as f:
f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(UpperCamelCase , '''w''' ) as f:
f.add(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Dict = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase__ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(UpperCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Any:
lowerCAmelCase__ : int = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(UpperCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(UpperCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) )
f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(UpperCamelCase , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : List[Any] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
lowerCAmelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( ) -> Optional[int]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( ) -> Optional[Any]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) )
f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 470 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["MobileViTFeatureExtractor"]
UpperCamelCase = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | """simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_3 , _SCREAMING_SNAKE_CASE=3_0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=3_2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3_7 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1_0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
a_ : List[Any] = parent
a_ : Any = batch_size
a_ : Optional[int] = image_size
a_ : Optional[int] = patch_size
a_ : Any = num_channels
a_ : int = is_training
a_ : Dict = use_labels
a_ : Dict = hidden_size
a_ : List[str] = num_hidden_layers
a_ : str = num_attention_heads
a_ : Tuple = intermediate_size
a_ : Tuple = hidden_act
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : List[str] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ : Tuple = (image_size // patch_size) ** 2
a_ : Optional[int] = num_patches + 1
def A ( self ) -> str:
a_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Dict = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Optional[int]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
a_ : Tuple = ViTMSNModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
a_ : Any = self.type_sequence_label_size
a_ : Union[str, Any] = ViTMSNForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : str = 1
a_ : Dict = ViTMSNForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self ) -> List[str]:
a_ : str = self.prepare_config_and_inputs()
a_ , a_ , a_ : Any = config_and_inputs
a_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : List[str] = False
def A ( self ) -> int:
a_ : Dict = ViTMSNModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def A ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def A ( self ) -> List[Any]:
pass
def A ( self ) -> str:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A ( self ) -> Optional[Any]:
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Tuple = [*signature.parameters.keys()]
a_ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A ( self ) -> str:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A ( self ) -> Tuple:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A ( self ) -> List[str]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Optional[Any] = ViTMSNModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ () -> Dict:
a_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self ) -> Dict:
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def A ( self ) -> Optional[Any]:
torch.manual_seed(2 )
a_ : Union[str, Any] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_SCREAMING_SNAKE_CASE )
a_ : Dict = self.default_image_processor
a_ : Any = prepare_img()
a_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
a_ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 473 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCamelCase : str = os.path.join(git_repo_path, "src", "transformers")
_lowerCamelCase : Optional[int] = '\n{0} = None\n'
_lowerCamelCase : Any = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_lowerCamelCase : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(UpperCamelCase__ )
_lowerCAmelCase : Dict = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(UpperCamelCase__ , """tokenizers""" )
_lowerCAmelCase : Optional[Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(UpperCamelCase__ , """tensorflow_text""" )
_lowerCAmelCase : Dict = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" )
_lowerCAmelCase : List[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" )
_lowerCAmelCase : int = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCamelCase__ )
self.assertIn("""tensorflow_text""" , UpperCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = create_dummy_object("""CONSTANT""" , """\'torch\'""" )
self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" )
_lowerCAmelCase : Tuple = create_dummy_object("""function""" , """\'torch\'""" )
self.assertEqual(
UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""" )
_lowerCAmelCase : str = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
_lowerCAmelCase : str = create_dummy_object("""FakeClass""" , """\'torch\'""" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
_lowerCAmelCase : str = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
| 712 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Any = 1_6
_lowerCamelCase : Tuple = 3_2
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : DatasetDict , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : int = 16 ):
'''simple docstring'''
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : int = DatasetDict(
{
"""train""": dataset["""train"""].select(UpperCamelCase_ ),
"""validation""": dataset["""train"""].select(UpperCamelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(UpperCamelCase_ : str ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : List[str] = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : int = 8
else:
_lowerCAmelCase : Dict = None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : List[str] = DataLoader(
tokenized_datasets["""test"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
# New Code #
_lowerCAmelCase : Dict = []
# Download the dataset
_lowerCAmelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_lowerCAmelCase : Any = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : List[str] = config["""lr"""]
_lowerCAmelCase : Any = int(config["""num_epochs"""] )
_lowerCAmelCase : Dict = int(config["""seed"""] )
_lowerCAmelCase : Union[str, Any] = int(config["""batch_size"""] )
_lowerCAmelCase : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_lowerCAmelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCAmelCase : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCAmelCase : int = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase_ )
# New Code #
# Create our folds:
_lowerCAmelCase : Optional[int] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_lowerCAmelCase : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase_ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = get_fold_dataloaders(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Any = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
_lowerCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase : Tuple = model(**UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = outputs.loss
_lowerCAmelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**UpperCamelCase_ )
_lowerCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
_lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_lowerCAmelCase : Dict = []
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Dict = model(**UpperCamelCase_ )
_lowerCAmelCase : List[Any] = outputs.logits
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_lowerCAmelCase : Tuple = torch.cat(UpperCamelCase_ , dim=0 )
_lowerCAmelCase : Tuple = torch.stack(UpperCamelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_lowerCAmelCase : Union[str, Any] = metric.compute(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
accelerator.print("""Average test metrics from all folds:""" , UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=UpperCamelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 196 | 0 |
from collections import deque
def A__ ( _a : int ):
'''simple docstring'''
snake_case__ : str =len(_a )
snake_case__ : str =deque()
snake_case__ : int =[False for _ in range(_a )]
snake_case__ : Optional[Any] =[-1 for _ in range(_a )]
snake_case__ : Optional[int] =index_of[:]
def strong_connect(_a : Tuple , _a : int , _a : Optional[Any] ):
snake_case__ : Any =index # the number when this node is seen
snake_case__ : Tuple =index # lowest rank node reachable from here
index += 1
stack.append(_a )
snake_case__ : int =True
for w in g[v]:
if index_of[w] == -1:
snake_case__ : Dict =strong_connect(_a , _a , _a )
snake_case__ : Any =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case__ : Dict =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case__ : str =[]
snake_case__ : str =stack.pop()
snake_case__ : int =False
component.append(_a )
while w != v:
snake_case__ : Union[str, Any] =stack.pop()
snake_case__ : str =False
component.append(_a )
components.append(_a )
return index
snake_case__ : Tuple =[]
for v in range(_a ):
if index_of[v] == -1:
strong_connect(_a , 0 , _a )
return components
def A__ ( _a : Union[str, Any] , _a : str ):
'''simple docstring'''
snake_case__ : Any =[[] for _ in range(_a )]
for u, v in edges:
g[u].append(_a )
return g
if __name__ == "__main__":
# Test
__lowerCamelCase : Tuple = 7
__lowerCamelCase : List[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__lowerCamelCase : List[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__lowerCamelCase : List[str] = [(u, v) for u, v in zip(source, target)]
__lowerCamelCase : int = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 385 |
from collections.abc import Iterable
from typing import Any
class _lowercase :
def __init__( self , a = None ):
snake_case__ : Optional[Any] =value
snake_case__ : Node | None =None # Added in order to delete a node easier
snake_case__ : Node | None =None
snake_case__ : Node | None =None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _lowercase :
def __init__( self , a = None ):
snake_case__ : Dict =root
def __str__( self ):
return str(self.root )
def lowercase__ ( self , a , a ):
if new_children is not None: # reset its kids
snake_case__ : str =node.parent
if node.parent is not None: # reset its parent
if self.is_right(a ): # If it is the right children
snake_case__ : List[Any] =new_children
else:
snake_case__ : List[str] =new_children
else:
snake_case__ : Optional[Any] =new_children
def lowercase__ ( self , a ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase__ ( self ):
return self.root is None
def lowercase__ ( self , a ):
snake_case__ : Optional[Any] =Node(a ) # create a new Node
if self.empty(): # if Tree is empty
snake_case__ : Optional[Any] =new_node # set its root
else: # Tree is not empty
snake_case__ : int =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case__ : str =new_node # We insert the new node in a leaf
break
else:
snake_case__ : List[Any] =parent_node.left
else:
if parent_node.right is None:
snake_case__ : List[Any] =new_node
break
else:
snake_case__ : Tuple =parent_node.right
snake_case__ : Optional[Any] =parent_node
def lowercase__ ( self , *a ):
for value in values:
self.__insert(a )
def lowercase__ ( self , a ):
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
snake_case__ : Optional[Any] =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case__ : Optional[int] =node.left if value < node.value else node.right
return node
def lowercase__ ( self , a = None ):
if node is None:
if self.root is None:
return None
snake_case__ : int =self.root
if not self.empty():
while node.right is not None:
snake_case__ : Any =node.right
return node
def lowercase__ ( self , a = None ):
if node is None:
snake_case__ : Dict =self.root
if self.root is None:
return None
if not self.empty():
snake_case__ : List[Any] =self.root
while node.left is not None:
snake_case__ : int =node.left
return node
def lowercase__ ( self , a ):
snake_case__ : List[str] =self.search(a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(a , a )
elif node.left is None: # Has only right children
self.__reassign_nodes(a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(a , node.left )
else:
snake_case__ : str =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case__ : Dict =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase__ ( self , a ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase__ ( self , a=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase__ ( self , a , a ):
if node:
self.inorder(a , node.left )
arr.append(node.value )
self.inorder(a , node.right )
def lowercase__ ( self , a , a ):
snake_case__ : list[int] =[]
self.inorder(a , a ) # append all values to list using inorder traversal
return arr[k - 1]
def A__ ( _a : Node | None ):
'''simple docstring'''
snake_case__ : int =[]
if curr_node is not None:
snake_case__ : int =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def A__ ( ):
'''simple docstring'''
snake_case__ : Tuple =(8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case__ : List[str] =BinarySearchTree()
for i in testlist:
t.insert(_a )
# Prints all the elements of the list in order traversal
print(_a )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_a )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 385 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Dict = RoCBertTokenizer
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : List[Any] = filter_non_english
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowerCAmelCase = {}
lowerCAmelCase = {}
for i, value in enumerate(_A ):
lowerCAmelCase = i
lowerCAmelCase = i
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_A , _A , ensure_ascii=_A )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_A , _A , ensure_ascii=_A )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_A , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_A ) , [5, 6, 2, 5, 7, 8] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase = {}
for i, token in enumerate(_A ):
lowerCAmelCase = i
lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_A , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ["""的""", """人""", """有"""]
lowerCAmelCase = """""".join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowerCAmelCase = tokenizer_p.encode(_A , add_special_tokens=_A )
lowerCAmelCase = tokenizer_r.encode(_A , add_special_tokens=_A )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_A )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowerCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A )
lowerCAmelCase = tokenizer_r.encode(_A , add_special_tokens=_A )
lowerCAmelCase = tokenizer_p.encode(_A , add_special_tokens=_A )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_A )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.encode('你好' , add_special_tokens=_A )
lowerCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=_A )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = """你好,你是谁"""
lowerCAmelCase = tokenizer.tokenize(_A )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A )
lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(_A )
lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(_A )
lowerCAmelCase = tokenizer.prepare_for_model(
_A , _A , _A , add_special_tokens=_A )
lowerCAmelCase = tokenizer.encode_plus(_A , add_special_tokens=_A )
self.assertEqual(_A , _A )
| 284 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : str = """"""
UpperCamelCase : Dict = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase , UpperCamelCase : int = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : List[Any] = [1 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : int = j - k + 1 # noqa: E741
UpperCamelCase : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : List[str] = length[j]
UpperCamelCase : List[str] = j
# create that string
UpperCamelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
a_ = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def __UpperCAmelCase ( __UpperCamelCase = "dhaka" , __UpperCamelCase = 5 ):
__lowercase : Tuple = min(UpperCamelCase__ , 50 ) # Prevent abuse!
__lowercase : Optional[Any] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
__lowercase : List[str] = requests.get('''https://www.google.com/search''' , params=UpperCamelCase__ , headers=UpperCamelCase__ )
__lowercase : Tuple = BeautifulSoup(html.text , '''html.parser''' )
__lowercase : Tuple = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
__lowercase : List[str] = json.dumps(UpperCamelCase__ )
__lowercase : int = json.loads(UpperCamelCase__ )
__lowercase : Union[str, Any] = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , UpperCamelCase__ , )
if not matched_google_image_data:
return 0
__lowercase : Union[str, Any] = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(UpperCamelCase__ ) , )
__lowercase : List[Any] = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , UpperCamelCase__ , )
for index, fixed_full_res_image in enumerate(UpperCamelCase__ ):
if index >= max_images:
return index
__lowercase : Any = bytes(UpperCamelCase__ , '''ascii''' ).decode(
'''unicode-escape''' )
__lowercase : List[Any] = bytes(UpperCamelCase__ , '''ascii''' ).decode(
'''unicode-escape''' )
__lowercase : List[Any] = urllib.request.build_opener()
__lowercase : Optional[int] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(UpperCamelCase__ )
__lowercase : Optional[int] = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
urllib.request.urlretrieve( # noqa: S310
UpperCamelCase__ , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
a_ = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('Please provide a search term.')
raise
| 720 |
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( __UpperCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase ) + 1 ) )
def __UpperCAmelCase ( __UpperCamelCase = 10**6 ):
__lowercase : Optional[int] = 0
__lowercase : Dict = 1
__lowercase : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 523 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__magic_name__ : Tuple = logging.get_logger(__name__)
class lowercase__ ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple = ['''input_features''', '''attention_mask''']
def __init__( self , _A=8_0 , _A=1_6_0_0_0 , _A=0.0 , _A=1_0 , _A=2_5 , _A="hamming_window" , _A=3_2_7_6_8.0 , _A=0.97 , _A=1.0 , _A=True , _A=True , _A=False , **_A , ):
'''simple docstring'''
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase : List[str] = feature_size
UpperCamelCase : str = sampling_rate
UpperCamelCase : List[str] = padding_value
UpperCamelCase : Dict = hop_length
UpperCamelCase : Union[str, Any] = win_length
UpperCamelCase : Union[str, Any] = frame_signal_scale
UpperCamelCase : Optional[Any] = preemphasis_coeff
UpperCamelCase : Optional[int] = mel_floor
UpperCamelCase : Union[str, Any] = normalize_means
UpperCamelCase : Optional[int] = normalize_vars
UpperCamelCase : str = win_function
UpperCamelCase : List[str] = return_attention_mask
UpperCamelCase : Union[str, Any] = win_length * sampling_rate // 1_0_0_0
UpperCamelCase : Optional[int] = hop_length * sampling_rate // 1_0_0_0
UpperCamelCase : Dict = optimal_fft_length(self.sample_size )
UpperCamelCase : int = (self.n_fft // 2) + 1
def _a ( self , _A ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCamelCase : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
else:
UpperCamelCase : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function )
UpperCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale , window=_UpperCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_UpperCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=_UpperCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def _a ( self , _A , _A , _A ):
'''simple docstring'''
if self.normalize_means:
UpperCamelCase : Optional[int] = x[:input_length].mean(axis=0 )
UpperCamelCase : Dict = np.subtract(_UpperCAmelCase , _UpperCAmelCase )
if self.normalize_vars:
UpperCamelCase : List[Any] = x[:input_length].std(axis=0 )
UpperCamelCase : Dict = np.divide(_UpperCAmelCase , _UpperCAmelCase )
if input_length < x.shape[0]:
UpperCamelCase : Union[str, Any] = padding_value
# make sure array is in float32
UpperCamelCase : Any = x.astype(np.floataa )
return x
def _a ( self , _A , _A = None ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_UpperCAmelCase , _UpperCAmelCase , self.padding_value ) for x, n in zip(_UpperCAmelCase , _UpperCAmelCase )]
def __call__( self , _A , _A = False , _A = None , _A = False , _A = None , _A = None , _A = None , _A = None , **_A , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase : Tuple = isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : List[str] = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : Optional[int] = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
UpperCamelCase : List[str] = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Dict = [raw_speech]
# extract fbank features
UpperCamelCase : Tuple = [self._extract_mfsc_features(_UpperCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase : Tuple = BatchFeature({"""input_features""": features} )
UpperCamelCase : Optional[Any] = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
# make sure list is in array format
UpperCamelCase : List[str] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _UpperCAmelCase ):
UpperCamelCase : Any = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
UpperCamelCase : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCamelCase : Dict = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCamelCase : List[Any] = (
np.array(_UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCamelCase : Dict = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_UpperCAmelCase )
if return_tensors is not None:
UpperCamelCase : Any = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
| 102 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase : Dict = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = ["""ConditionalDetrFeatureExtractor"""]
UpperCamelCase : Tuple = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 151 |
def UpperCamelCase_ ( __a ) -> int:
if not isinstance(__a , __a ):
raise TypeError("only integers accepted as input" )
else:
a__ : Union[str, Any] = str(abs(__a ) )
a__ : Dict = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int("".join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 151 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[int] = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'ChineseCLIPImageProcessor'
__lowerCamelCase: str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , a : str=None , a : Union[str, Any]=None , **a : Dict ):
'''simple docstring'''
lowercase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
lowercase_ : str = kwargs.pop("feature_extractor" )
lowercase_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
lowercase_ : Optional[Any] = self.image_processor
def __call__( self : Dict , a : Optional[Any]=None , a : Union[str, Any]=None , a : Optional[Any]=None , **a : Optional[Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase_ : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
lowercase_ : List[Any] = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
lowercase_ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def lowerCAmelCase__ ( self : List[Any] , *a : str , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : int , *a : Dict , **a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[Any] = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
| 620 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: List[Any] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase: List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCamelCase: Optional[Any] = ['accelerate', 'launch']
__lowerCamelCase: List[str] = Path.home() / '.cache/huggingface/accelerate'
__lowerCamelCase: Dict = 'default_config.yaml'
__lowerCamelCase: Union[str, Any] = config_folder / config_file
__lowerCamelCase: Tuple = config_folder / '_default_config.yaml'
__lowerCamelCase: Union[str, Any] = Path('tests/test_configs' )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[str] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: Tuple = 'test-tpu'
__lowerCamelCase: Dict = 'us-central1-a'
__lowerCamelCase: List[str] = 'ls'
__lowerCamelCase: Optional[int] = ['accelerate', 'tpu-config']
__lowerCamelCase: Optional[Any] = 'cd /usr/share'
__lowerCamelCase: Any = 'tests/test_samples/test_command_file.sh'
__lowerCamelCase: Tuple = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : str = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
| 620 | 1 |
from __future__ import annotations
import math
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : bool , snake_case_ : list[int] , snake_case_ : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(snake_case_ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_ ) , )
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: int = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase: Union[str, Any] = math.log(len(snake_case_ ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , snake_case_ , snake_case_ , snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 166 |
from collections import deque
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase: Tuple = process_name # process name
UpperCAmelCase: Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase: Tuple = arrival_time
UpperCAmelCase: List[Any] = burst_time # remaining burst time
UpperCAmelCase: Optional[Any] = 0 # total time of the process wait in ready queue
UpperCAmelCase: List[Any] = 0 # time from arrival time to completion time
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
UpperCAmelCase: str = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase: str = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase: Union[str, Any] = queue
# current time
UpperCAmelCase: Any = current_time
# finished process is in this sequence queue
UpperCAmelCase: deque[Process] = deque()
def A__ ( self ) -> list[str]:
"""simple docstring"""
UpperCAmelCase: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: Tuple = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def A__ ( self , __snake_case ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A__ ( self , __snake_case ) -> deque[Process]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
UpperCAmelCase: Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCAmelCase: Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase: str = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A__ ( self , __snake_case , __snake_case ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
UpperCAmelCase: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase: Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase: Optional[Any] = 0
# set the finish time
UpperCAmelCase: Dict = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase: Optional[Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A__ ( self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ : Tuple = Process('P1', 0, 5_3)
snake_case_ : List[str] = Process('P2', 0, 1_7)
snake_case_ : Optional[Any] = Process('P3', 0, 6_8)
snake_case_ : int = Process('P4', 0, 2_4)
snake_case_ : Optional[Any] = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
snake_case_ : List[Any] = Process('P1', 0, 5_3)
snake_case_ : Optional[Any] = Process('P2', 0, 1_7)
snake_case_ : Optional[int] = Process('P3', 0, 6_8)
snake_case_ : List[Any] = Process('P4', 0, 2_4)
snake_case_ : Tuple = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
snake_case_ : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 166 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["speech"]
def __init__( self : List[Any] ,*A : Optional[int] ,**A : List[str] ):
requires_backends(self ,["speech"] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["speech"]
def __init__( self : int ,*A : Optional[int] ,**A : Optional[int] ):
requires_backends(self ,["speech"] )
| 55 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def _a ( lowerCamelCase_ ):
snake_case : Tuple ={key: len(snake_case_ ) for key, value in gen_kwargs.items() if isinstance(snake_case_ , snake_case_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
snake_case : List[Any] =max(lists_lengths.values() , default=0 )
return max(1 , snake_case_ )
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : List[Any] =[]
for group_idx in range(snake_case_ ):
snake_case : int =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
snake_case : str =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
snake_case : List[Any] =range(snake_case_ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case_ )
return shards_indices_per_group
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Any =_number_of_shards_in_gen_kwargs(snake_case_ )
if num_shards == 1:
return [dict(snake_case_ )]
else:
snake_case : Optional[Any] =_distribute_shards(num_shards=snake_case_ , max_num_jobs=snake_case_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case_ , snake_case_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case_ ) )
]
def _a ( lowerCamelCase_ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple ={len(snake_case_ ) for value in gen_kwargs.values() if isinstance(snake_case_ , snake_case_ )}
snake_case : Tuple ={}
for size in list_sizes:
snake_case : int =list(range(snake_case_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
snake_case : int =dict(snake_case_ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case : List[Any] =[value[i] for i in indices_per_size[len(snake_case_ )]]
return shuffled_kwargs
| 719 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( lowerCamelCase_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case : str =model_type_to_module_name(lowerCamelCase_ )
snake_case : int =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(lowerCamelCase_ , lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ , '''__name__''' , lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case : List[str] =importlib.import_module('''transformers''' )
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
return getattr(lowerCamelCase_ , lowerCamelCase_ )
return None
def _a ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ):
snake_case : List[str] =get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as reader:
return json.load(lowerCamelCase_ )
class lowerCAmelCase_ :
def __init__( self : List[Any] ):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def __snake_case ( cls : Union[str, Any], _snake_case : Union[str, Any], **_snake_case : Any ):
'''simple docstring'''
snake_case : Any =kwargs.pop('''config''', _snake_case )
snake_case : Optional[Any] =kwargs.pop('''trust_remote_code''', _snake_case )
snake_case : List[Any] =True
snake_case , snake_case : Dict =FeatureExtractionMixin.get_feature_extractor_dict(_snake_case, **_snake_case )
snake_case : str =config_dict.get('''feature_extractor_type''', _snake_case )
snake_case : str =None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ):
snake_case : Optional[Any] =config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_snake_case, _snake_case ):
snake_case : Optional[Any] =AutoConfig.from_pretrained(_snake_case, **_snake_case )
# It could be in `config.feature_extractor_type``
snake_case : Any =getattr(_snake_case, '''feature_extractor_type''', _snake_case )
if hasattr(_snake_case, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
snake_case : Union[str, Any] =config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
snake_case : Tuple =feature_extractor_class_from_name(_snake_case )
snake_case : Optional[int] =feature_extractor_auto_map is not None
snake_case : Optional[int] =feature_extractor_class is not None or type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING
snake_case : Dict =resolve_trust_remote_code(
_snake_case, _snake_case, _snake_case, _snake_case )
if has_remote_code and trust_remote_code:
snake_case : Optional[Any] =get_class_from_dynamic_module(
_snake_case, _snake_case, **_snake_case )
snake_case : List[Any] =kwargs.pop('''code_revision''', _snake_case )
if os.path.isdir(_snake_case ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_snake_case, **_snake_case )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_snake_case, **_snake_case )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_snake_case ) in FEATURE_EXTRACTOR_MAPPING:
snake_case : List[Any] =FEATURE_EXTRACTOR_MAPPING[type(_snake_case )]
return feature_extractor_class.from_dict(_snake_case, **_snake_case )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __snake_case ( _snake_case : List[str], _snake_case : int ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(_snake_case, _snake_case )
| 136 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a ( ) -> Any:
"""simple docstring"""
_lowercase =ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__lowerCamelCase )
return parser.parse_args()
def a ( ) -> Any:
"""simple docstring"""
_lowercase =parse_args()
# Import training_script as a module.
_lowercase =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowercase =script_fpath.stem
_lowercase =importlib.import_module(__lowerCamelCase )
# Patch sys.argv
_lowercase =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase__ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCAmelCase__ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
UpperCAmelCase__ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase ( self : List[str] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowercase ( self : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ):
_snake_case = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_snake_case = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
_snake_case = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
_snake_case = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 224 | 0 |
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase__ :int , lowerCamelCase__ :Dict=None , lowerCamelCase__ :str=None ):
UpperCamelCase__ :List[str] = data
UpperCamelCase__ :Dict = previous
UpperCamelCase__ :str = next_node
def __str__( self :Dict ):
return f"""{self.data}"""
def __a ( self :Dict ):
return self.data
def __a ( self :Union[str, Any] ):
return self.next
def __a ( self :Union[str, Any] ):
return self.previous
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = head
def __iter__( self :Optional[int] ):
return self
def __a ( self :List[Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase__ :List[Any] = self.current.get_data()
UpperCamelCase__ :int = self.current.get_next()
return value
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = None # First node in list
UpperCamelCase__ :Union[str, Any] = None # Last node in list
def __str__( self :str ):
UpperCamelCase__ :Optional[int] = self.head
UpperCamelCase__ :Optional[Any] = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase__ :int = current.get_next()
return " ".join(str(lowerCamelCase__ ) for node in nodes )
def __contains__( self :int , lowerCamelCase__ :int ):
UpperCamelCase__ :Union[str, Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase__ :Tuple = current.get_next()
return False
def __iter__( self :Any ):
return LinkedListIterator(self.head )
def __a ( self :Dict ):
if self.head:
return self.head.get_data()
return None
def __a ( self :Union[str, Any] ):
if self.tail:
return self.tail.get_data()
return None
def __a ( self :Tuple , lowerCamelCase__ :Node ):
if self.head is None:
UpperCamelCase__ :List[str] = node
UpperCamelCase__ :str = node
else:
self.insert_before_node(self.head , lowerCamelCase__ )
def __a ( self :List[Any] , lowerCamelCase__ :Node ):
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.insert_after_node(self.tail , lowerCamelCase__ )
def __a ( self :str , lowerCamelCase__ :int ):
UpperCamelCase__ :Any = Node(lowerCamelCase__ )
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.set_tail(lowerCamelCase__ )
def __a ( self :str , lowerCamelCase__ :Node , lowerCamelCase__ :Node ):
UpperCamelCase__ :str = node
UpperCamelCase__ :Optional[int] = node.previous
if node.get_previous() is None:
UpperCamelCase__ :Union[str, Any] = node_to_insert
else:
UpperCamelCase__ :Optional[Any] = node_to_insert
UpperCamelCase__ :List[str] = node_to_insert
def __a ( self :Optional[int] , lowerCamelCase__ :Node , lowerCamelCase__ :Node ):
UpperCamelCase__ :Optional[Any] = node
UpperCamelCase__ :List[Any] = node.next
if node.get_next() is None:
UpperCamelCase__ :Tuple = node_to_insert
else:
UpperCamelCase__ :List[Any] = node_to_insert
UpperCamelCase__ :Any = node_to_insert
def __a ( self :Any , lowerCamelCase__ :int , lowerCamelCase__ :int ):
UpperCamelCase__ :str = 1
UpperCamelCase__ :Optional[Any] = Node(lowerCamelCase__ )
UpperCamelCase__ :str = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__ , lowerCamelCase__ )
return
current_position += 1
UpperCamelCase__ :List[str] = node.next
self.insert_after_node(self.tail , lowerCamelCase__ )
def __a ( self :Union[str, Any] , lowerCamelCase__ :int ):
UpperCamelCase__ :int = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase__ :Dict = node.get_next()
raise Exception("""Node not found""" )
def __a ( self :Union[str, Any] , lowerCamelCase__ :int ):
if (node := self.get_node(lowerCamelCase__ )) is not None:
if node == self.head:
UpperCamelCase__ :Tuple = self.head.get_next()
if node == self.tail:
UpperCamelCase__ :Any = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__ )
@staticmethod
def __a ( lowerCamelCase__ :Node ):
if node.get_next():
UpperCamelCase__ :Tuple = node.previous
if node.get_previous():
UpperCamelCase__ :Dict = node.next
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :Tuple = None
def __a ( self :Optional[int] ):
return self.head is None
def A ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 717 |
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = """tapas"""
def __init__( self :List[Any] , lowerCamelCase__ :List[str]=3_05_22 , lowerCamelCase__ :str=7_68 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :Any=12 , lowerCamelCase__ :Tuple=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :List[str]=10_24 , lowerCamelCase__ :List[Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :str=1e-12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :Optional[int]=10.0 , lowerCamelCase__ :int=0 , lowerCamelCase__ :Dict=1.0 , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :Union[str, Any]=1.0 , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any="ratio" , lowerCamelCase__ :int=None , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :int=64 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[str]=False , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :str=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :str=None , lowerCamelCase__ :List[Any]=None , **lowerCamelCase__ :Optional[Any] , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :str = num_attention_heads
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Any = max_position_embeddings
UpperCamelCase__ :List[Any] = type_vocab_sizes
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase__ :List[str] = positive_label_weight
UpperCamelCase__ :int = num_aggregation_labels
UpperCamelCase__ :str = aggregation_loss_weight
UpperCamelCase__ :Optional[Any] = use_answer_as_supervision
UpperCamelCase__ :Tuple = answer_loss_importance
UpperCamelCase__ :Dict = use_normalized_answer_loss
UpperCamelCase__ :Optional[Any] = huber_loss_delta
UpperCamelCase__ :Any = temperature
UpperCamelCase__ :Union[str, Any] = aggregation_temperature
UpperCamelCase__ :Tuple = use_gumbel_for_cells
UpperCamelCase__ :Tuple = use_gumbel_for_aggregation
UpperCamelCase__ :Optional[int] = average_approximation_function
UpperCamelCase__ :Optional[Any] = cell_selection_preference
UpperCamelCase__ :Any = answer_loss_cutoff
UpperCamelCase__ :Dict = max_num_rows
UpperCamelCase__ :Optional[int] = max_num_columns
UpperCamelCase__ :Tuple = average_logits_per_cell
UpperCamelCase__ :Any = select_one_column
UpperCamelCase__ :Dict = allow_empty_column_selection
UpperCamelCase__ :Union[str, Any] = init_cell_selection_weights_to_zero
UpperCamelCase__ :Optional[Any] = reset_position_index_per_cell
UpperCamelCase__ :List[str] = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase__ :Tuple = aggregation_labels
UpperCamelCase__ :str = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = {int(lowerCamelCase__ ): v for k, v in aggregation_labels.items()} | 383 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCAmelCase : Optional[int] = TypeVar('''T''')
class __magic_name__ ( Generic[T] ):
"""simple docstring"""
def __init__( self :int , snake_case :list[T] , snake_case :Callable[[T, T], T] ):
'''simple docstring'''
A_ : Dict = None
A_ : Optional[Any] = len(a__ )
A_ : Tuple = [any_type for _ in range(self.N )] + arr
A_ : List[str] = fnc
self.build()
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
A_ : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :T ):
'''simple docstring'''
p += self.N
A_ : int = v
while p > 1:
A_ : int = p // 2
A_ : List[str] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :int , snake_case :int ): # noqa: E741
'''simple docstring'''
A_ , A_ : str = l + self.N, r + self.N
A_ : Union[str, Any] = None
while l <= r:
if l % 2 == 1:
A_ : Dict = self.st[l] if res is None else self.fn(a__ , self.st[l] )
if r % 2 == 0:
A_ : Optional[Any] = self.st[r] if res is None else self.fn(a__ , self.st[r] )
A_ , A_ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCAmelCase : Tuple = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCAmelCase : Tuple = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCAmelCase : Any = SegmentTree(test_array, min)
_lowerCAmelCase : Union[str, Any] = SegmentTree(test_array, max)
_lowerCAmelCase : Optional[int] = SegmentTree(test_array, lambda a, b: a + b)
def __snake_case ( ) -> str:
for i in range(len(_lowercase ) ):
for j in range(_lowercase , len(_lowercase ) ):
A_ : int = reduce(_lowercase , test_array[i : j + 1] )
A_ : Union[str, Any] = reduce(_lowercase , test_array[i : j + 1] )
A_ : Optional[Any] = reduce(lambda _lowerCAmelCase , _lowerCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_lowercase , _lowercase )
assert max_range == max_segment_tree.query(_lowercase , _lowercase )
assert sum_range == sum_segment_tree.query(_lowercase , _lowercase )
test_all_segments()
for index, value in test_updates.items():
_lowerCAmelCase : List[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 454 |
'''simple docstring'''
import sys
UpperCamelCase_ : Union[str, Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase (_lowercase = N ):
"""simple docstring"""
a__ = -sys.maxsize - 1
for i in range(len(_lowercase ) - 12 ):
a__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a__ = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 331 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_UpperCAmelCase : Union[str, Any] = False
class lowerCAmelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase__ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowerCamelCase__ : int = torch.manual_seed(0 )
lowerCamelCase__ : str = pipe(
image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowerCamelCase__ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 188 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_UpperCAmelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
_UpperCAmelCase : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class lowerCAmelCase :
def __init__( self : Any , UpperCAmelCase : Iterable[int] ) -> None:
lowerCamelCase__ : Node | None = None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
lowerCamelCase__ : Tuple = Node(UpperCAmelCase , self.head )
def __iter__( self : Optional[int] ) -> Iterator[int]:
lowerCamelCase__ : str = self.head
while node:
yield node.data
lowerCamelCase__ : List[str] = node.next_node
def __len__( self : List[str] ) -> int:
return sum(1 for _ in self )
def __str__( self : Any ) -> str:
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(_UpperCAmelCase ) + list(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 188 | 1 |
"""simple docstring"""
def A__ ( __lowerCamelCase = 4_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 589 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[Any] = LxmertTokenizer
UpperCamelCase : str = LxmertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Optional[int] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = 'UNwant\u00E9d,running'
_lowerCAmelCase = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__magic_name__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 1_0, 8, 9] )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = 'I was born in 92000, and this is falsé.'
_lowerCAmelCase = tokenizer.tokenize(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
| 589 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE : Optional[Any] = """LayoutLMv2ImageProcessor"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , __magic_name__ :List[Any]=None , __magic_name__ :str=None , **__magic_name__ :Any ) ->int:
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
lowercase : List[str] = kwargs.pop("""feature_extractor""" )
lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self :Tuple , __magic_name__ :Dict , __magic_name__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __magic_name__ :Union[List[List[int]], List[List[List[int]]]] = None , __magic_name__ :Optional[Union[List[int], List[List[int]]]] = None , __magic_name__ :bool = True , __magic_name__ :Union[bool, str, PaddingStrategy] = False , __magic_name__ :Union[bool, str, TruncationStrategy] = None , __magic_name__ :Optional[int] = None , __magic_name__ :int = 0 , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[bool] = None , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :bool = True , __magic_name__ :Optional[Union[str, TensorType]] = None , **__magic_name__ :str , ) ->BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
lowercase : Optional[int] = self.image_processor(images=__magic_name__ , return_tensors=__magic_name__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__magic_name__ , __magic_name__ ):
lowercase : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : int = features["""words"""]
lowercase : str = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel values
lowercase : Optional[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowercase : List[str] = self.get_overflowing_images(__magic_name__ , encoded_inputs["""overflow_to_sample_mapping"""] )
lowercase : List[str] = images
return encoded_inputs
def __snake_case ( self :Any , __magic_name__ :Tuple , __magic_name__ :List[str] ) ->Union[str, Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase : Optional[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(__magic_name__ )} and {len(__magic_name__ )}""" )
return images_with_overflow
def __snake_case ( self :Any , *__magic_name__ :List[str] , **__magic_name__ :List[Any] ) ->Optional[Any]:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __snake_case ( self :Dict , *__magic_name__ :List[str] , **__magic_name__ :Any ) ->Any:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __snake_case ( self :Optional[int] ) ->Any:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __snake_case ( self :str ) ->Tuple:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def __snake_case ( self :List[str] ) ->Any:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 348 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase (unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __snake_case ( self :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :List[str] ) ->Optional[Any]:
lowercase : List[Any] = TextaTextGenerationPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
return generator, ["Something to write", "Something else"]
def __snake_case ( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :int ) ->Optional[Any]:
lowercase : Optional[Any] = generator("""Something there""" )
self.assertEqual(__magic_name__ , [{"""generated_text""": ANY(__magic_name__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowercase : int = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
lowercase : Dict = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
with self.assertRaises(__magic_name__ ):
generator(4 )
@require_torch
def __snake_case ( self :int ) ->Any:
lowercase : Union[str, Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowercase : List[Any] = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
lowercase : Dict = 3
lowercase : Optional[Any] = generator(
"""Something there""" , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , )
lowercase : Tuple = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(__magic_name__ , __magic_name__ )
lowercase : Dict = generator("""This is a test""" , do_sample=__magic_name__ , num_return_sequences=2 , return_tensors=__magic_name__ )
self.assertEqual(
__magic_name__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowercase : List[Any] = generator.model.config.eos_token_id
lowercase : Dict = """<pad>"""
lowercase : Optional[Any] = generator(
["""This is a test""", """This is a second test"""] , do_sample=__magic_name__ , num_return_sequences=2 , batch_size=2 , return_tensors=__magic_name__ , )
self.assertEqual(
__magic_name__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Dict = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowercase : List[Any] = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
| 348 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
A_ : int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Tuple = {}
with open(UpperCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: List[Any] = line.strip()
if line:
UpperCamelCase_: List[Any] = line.split()
UpperCamelCase_: Optional[Any] = line_number
UpperCamelCase_: Any = words[0]
UpperCamelCase_: List[Any] = value
return result
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
for attribute in key.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Any = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: Dict = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: Optional[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Optional[Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Tuple = shape_pointer.shape
# let's reduce dimension
UpperCamelCase_: int = value[0]
else:
UpperCamelCase_: Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase_: Optional[int] = value
elif weight_type == "weight_g":
UpperCamelCase_: Any = value
elif weight_type == "weight_v":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "bias":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = value
else:
UpperCamelCase_: int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Dict = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: List[Any] = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: List[Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Any = '.'.join([key, hf_param_name] )
else:
UpperCamelCase_: Union[str, Any] = key
UpperCamelCase_: Any = value if 'lm_head' in full_key else value[0]
A_ : str = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Any:
UpperCamelCase_: Optional[int] = False
for key, mapped_key in MAPPING.items():
UpperCamelCase_: Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase_: Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase_: Optional[int] = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
UpperCamelCase_: Any = mapped_key.replace('*' , UpperCAmelCase__ )
if "weight_g" in name:
UpperCamelCase_: Union[str, Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_: Dict = 'weight_v'
elif "bias" in name:
UpperCamelCase_: int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_: str = 'weight'
else:
UpperCamelCase_: Union[str, Any] = None
if hf_dict is not None:
rename_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return is_used
return is_used
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Dict = fairseq_model.state_dict()
UpperCamelCase_: Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_: Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_: List[Any] = True
else:
UpperCamelCase_: Tuple = load_wavaveca_layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1]
UpperCamelCase_: int = name.split('.' )
UpperCamelCase_: int = int(items[0] )
UpperCamelCase_: Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase_: int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase_: List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False ) -> Dict:
if config_path is not None:
UpperCamelCase_: Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCamelCase_: List[str] = WavaVecaConfig()
if is_seq_class:
UpperCamelCase_: int = read_txt_into_dict(UpperCAmelCase__ )
UpperCamelCase_: Tuple = idalabel
UpperCamelCase_: str = WavaVecaForSequenceClassification(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
feature_extractor.save_pretrained(UpperCAmelCase__ )
elif is_finetuned:
if dict_path:
UpperCamelCase_: List[Any] = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase_: Dict = target_dict.pad_index
UpperCamelCase_: Tuple = target_dict.bos_index
UpperCamelCase_: Optional[Any] = target_dict.eos_index
UpperCamelCase_: Union[str, Any] = len(target_dict.symbols )
UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: str = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase_: List[str] = 0
UpperCamelCase_: List[Any] = 1
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = WavaVecaCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , )
UpperCamelCase_: Any = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase_: Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
UpperCamelCase_: Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: Any = WavaVecaForCTC(UpperCAmelCase__ )
else:
UpperCamelCase_: Any = WavaVecaForPreTraining(UpperCAmelCase__ )
if is_finetuned or is_seq_class:
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase_: List[str] = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase_: Any = fairseq.tasks.setup_task(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__ )
UpperCamelCase_: str = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
A_ : int = parser.parse_args()
A_ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 57 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
A_ : Optional[Any] = ['names', 'prefix']
A_ : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A_ : List[Any] = ['encoding_errors', 'on_bad_lines']
A_ : Optional[Any] = ['date_format']
@dataclass
class _lowerCAmelCase( datasets.BuilderConfig ):
"""simple docstring"""
a : str =","
a : Optional[str] =None
a : Optional[Union[int, List[int], str]] ="infer"
a : Optional[List[str]] =None
a : Optional[List[str]] =None
a : Optional[Union[int, str, List[int], List[str]]] =None
a : Optional[Union[List[int], List[str]]] =None
a : Optional[str] =None
a : bool =True
a : Optional[Literal["c", "python", "pyarrow"]] =None
a : Dict[Union[int, str], Callable[[Any], Any]] =None
a : Optional[list] =None
a : Optional[list] =None
a : bool =False
a : Optional[Union[int, List[int]]] =None
a : Optional[int] =None
a : Optional[Union[str, List[str]]] =None
a : bool =True
a : bool =True
a : bool =False
a : bool =True
a : Optional[str] =None
a : str ="."
a : Optional[str] =None
a : str ='"'
a : int =0
a : Optional[str] =None
a : Optional[str] =None
a : Optional[str] =None
a : Optional[str] =None
a : bool =True
a : bool =True
a : int =0
a : bool =True
a : bool =False
a : Optional[str] =None
a : int =10000
a : Optional[datasets.Features] =None
a : Optional[str] ="strict"
a : Literal["error", "warn", "skip"] ="error"
a : Optional[str] =None
def _a ( self ):
if self.delimiter is not None:
UpperCamelCase_: Optional[Any] = self.delimiter
if self.column_names is not None:
UpperCamelCase_: int = self.column_names
@property
def _a ( self ):
UpperCamelCase_: Any = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a : Dict =CsvConfig
def _a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , _lowerCamelCase ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Tuple = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase_: Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = [files]
UpperCamelCase_: int = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'files': files} ) )
return splits
def _a ( self , _lowerCamelCase ):
if self.config.features is not None:
UpperCamelCase_: List[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
UpperCamelCase_: Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(_lowerCamelCase , _lowerCamelCase )
return pa_table
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCamelCase_: Dict = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = pd.read_csv(_lowerCamelCase , iterator=_lowerCamelCase , dtype=_lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = pa.Table.from_pandas(_lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' )
raise | 57 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case , __snake_case = emb.weight.shape
__snake_case = nn.Linear(snake_case, snake_case, bias=snake_case)
__snake_case = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = torch.load(snake_case, map_location='''cpu''')
__snake_case = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__snake_case = mam_aaa['''model''']
remove_ignore_keys_(snake_case)
__snake_case = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__snake_case = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=10_24, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__snake_case = state_dict['''decoder.embed_tokens.weight''']
__snake_case = MaMaaaForConditionalGeneration(snake_case)
model.model.load_state_dict(snake_case, strict=snake_case)
__snake_case = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__lowercase : str = parser.parse_args()
__lowercase : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 93 | """simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : Dict , A_ : List[Any]=13 , A_ : Dict=7 , A_ : Optional[int]=True , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Union[str, Any]=True , A_ : str=99 , A_ : Union[str, Any]=32 , A_ : Optional[int]=5 , A_ : Union[str, Any]=4 , A_ : Dict=37 , A_ : Any="gelu" , A_ : Tuple=0.1 , A_ : str=0.1 , A_ : List[str]=512 , A_ : List[str]=16 , A_ : Optional[int]=2 , A_ : Optional[Any]=0.02 , A_ : str=4 , ) -> Any:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowercase ( self : List[Any] ) -> Any:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self : str ) -> List[str]:
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowercase ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=A_ )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : List[str] ) -> List[str]:
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(A_ )[0]
__snake_case = 50_000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , A_ )
__snake_case = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) ) | 93 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase ( _snake_case ,_snake_case ):
return np.sqrt(np.sum((np.asarray(_snake_case ) - np.asarray(_snake_case )) ** 2 ) )
def lowerCamelCase ( _snake_case ,_snake_case ):
return sum((va - va) ** 2 for va, va in zip(_snake_case ,_snake_case ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' ,number=10000 ,globals=globals() ,) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' ,number=10000 ,globals=globals() ,) )
benchmark()
| 110 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = emb.weight.shape
UpperCAmelCase__ : List[str] = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
UpperCAmelCase__ : Any = emb.weight.data
return lin_layer
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Optional[int] = torch.load(_snake_case ,map_location='cpu' )
UpperCAmelCase__ : Any = mam_aaa['args'] or mam_aaa['cfg']['model']
UpperCAmelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_snake_case )
UpperCAmelCase__ : Optional[int] = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCAmelCase__ : List[str] = MaMaaaConfig(
vocab_size=_snake_case ,max_position_embeddings=1024 ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,encoder_layerdrop=args.encoder_layerdrop ,decoder_layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='relu' ,)
UpperCAmelCase__ : Optional[int] = state_dict['decoder.embed_tokens.weight']
UpperCAmelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case ,strict=_snake_case )
UpperCAmelCase__ : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 110 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __UpperCAmelCase ( __a : str ,__a : float | Decimal ,__a : float = 10**-10 ) -> float:
"""simple docstring"""
_a : Optional[int] = a
while True:
_a : List[str] = Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 578 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ['''names''', '''prefix''']
a__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a__ = ['''encoding_errors''', '''on_bad_lines''']
a__ = ['''date_format''']
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : str = ","
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[Union[int, List[int], str]] = "infer"
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCAmelCase__ : Optional[Union[List[int], List[str]]] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCAmelCase__ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Union[int, List[int]]] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Union[str, List[str]]] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = "."
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = '"'
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : int = 10000
UpperCAmelCase__ : Optional[datasets.Features] = None
UpperCAmelCase__ : Optional[str] = "strict"
UpperCAmelCase__ : Literal["error", "warn", "skip"] = "error"
UpperCAmelCase__ : Optional[str] = None
def __lowercase ( self ) -> Union[str, Any]:
if self.delimiter is not None:
_a : str = self.delimiter
if self.column_names is not None:
_a : Optional[int] = self.column_names
@property
def __lowercase ( self ) -> Any:
_a : int = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = CsvConfig
def __lowercase ( self ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> int:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Optional[int] = data_files
if isinstance(_a , _a ):
_a : Union[str, Any] = [files]
_a : Any = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : List[str] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[Any] = [files]
_a : Tuple = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
_a : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
_a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_a : int = table_cast(_a , _a )
return pa_table
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : List[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
_a : Any = pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
_a : Optional[Any] = pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise
| 578 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.