code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _snake_case ( a_ , a_ ):
UpperCamelCase__ = 'nat'
UpperCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=4 , _a=3 , _a=64 , _a=[3, 4, 6, 5] , _a=[2, 4, 8, 16] , _a=7 , _a=3.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=0.02 , _a=1e-5 , _a=0.0 , _a=None , _a=None , **_a , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Optional[int] = embed_dim
__magic_name__ : List[str] = depths
__magic_name__ : str = len(__SCREAMING_SNAKE_CASE )
__magic_name__ : List[str] = num_heads
__magic_name__ : Any = kernel_size
__magic_name__ : Tuple = mlp_ratio
__magic_name__ : Optional[Any] = qkv_bias
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = drop_path_rate
__magic_name__ : int = hidden_act
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : Optional[int] = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__magic_name__ : Optional[Any] = layer_scale_init_value
__magic_name__ : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__magic_name__ , __magic_name__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 124
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """blip_text_model"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=3_05_24 , __SCREAMING_SNAKE_CASE : Any=7_68 , __SCREAMING_SNAKE_CASE : List[Any]=7_68 , __SCREAMING_SNAKE_CASE : Tuple=30_72 , __SCREAMING_SNAKE_CASE : Any=7_68 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : int=5_12 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=1E-12 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=3_05_22 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=1_02 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , **__SCREAMING_SNAKE_CASE : str , ):
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , sep_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_size
__a = hidden_size
__a = encoder_hidden_size
__a = intermediate_size
__a = projection_dim
__a = hidden_dropout_prob
__a = num_hidden_layers
__a = num_attention_heads
__a = max_position_embeddings
__a = layer_norm_eps
__a = hidden_act
__a = initializer_range
__a = attention_probs_dropout_prob
__a = is_decoder
__a = use_cache
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : int ):
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
__a = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """blip_vision_model"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=7_68 , __SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , __SCREAMING_SNAKE_CASE : List[Any]=5_12 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_84 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1E-10 , **__SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
__a = hidden_size
__a = intermediate_size
__a = projection_dim
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : str ):
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
__a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """blip"""
_SCREAMING_SNAKE_CASE = True
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=5_12 , __SCREAMING_SNAKE_CASE : List[Any]=2.65_92 , __SCREAMING_SNAKE_CASE : Any=2_56 , **__SCREAMING_SNAKE_CASE : Any , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if text_config is None:
__a = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
__a = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
__a = BlipTextConfig(**__SCREAMING_SNAKE_CASE )
__a = BlipVisionConfig(**__SCREAMING_SNAKE_CASE )
__a = self.vision_config.hidden_size
__a = projection_dim
__a = logit_scale_init_value
__a = 1.0
__a = 0.02
__a = image_text_hidden_size
@classmethod
def _UpperCAmelCase ( cls : Any , __SCREAMING_SNAKE_CASE : BlipTextConfig , __SCREAMING_SNAKE_CASE : BlipVisionConfig , **__SCREAMING_SNAKE_CASE : List[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = copy.deepcopy(self.__dict__ )
__a = self.text_config.to_dict()
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 197
| 0
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
a__ = logging.getLogger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[str]:
"""simple docstring"""
_a : List[Any] = git.Repo(search_parent_directories=__a )
_a : Any = {
'''repo_id''': str(__a ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__a ,'''git_log.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a ,indent=4 )
def __UpperCAmelCase ( __a : str ) -> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
_a : Union[str, Any] = 0
_a : Dict = -1
_a : Optional[int] = True
_a : Dict = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
_a : Any = int(os.environ['''WORLD_SIZE'''] )
_a : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
_a : Optional[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
_a : Any = params.world_size // params.n_gpu_per_node
_a : Optional[int] = params.global_rank // params.n_gpu_per_node
_a : Tuple = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
_a : Union[str, Any] = 1
_a : Optional[int] = 0
_a : Optional[int] = 0
_a : str = 0
_a : Optional[Any] = 1
_a : Union[str, Any] = 1
_a : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_a : Any = params.node_id == 0 and params.local_rank == 0
_a : str = params.n_nodes > 1
# summary
_a : Any = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' ,backend='''nccl''' ,)
def __UpperCAmelCase ( __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 710
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
a__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
a__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , _a , _a , _a=4 , _a=False ) -> List[Any]:
_a : Dict = compute_bleu(
reference_corpus=_a , translation_corpus=_a , max_order=_a , smooth=_a )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 578
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCamelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=A_ , cache_dir=A_ )
UpperCamelCase = [t[-1] for t in os.walk(os.path.join(A_ , os.listdir(A_ )[0] , 'snapshots' ) )]
UpperCamelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=A_ )
UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 4
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(A_ )
# shard inputs and rng
UpperCamelCase = replicate(A_ )
UpperCamelCase = jax.random.split(A_ , A_ )
UpperCamelCase = shard(A_ )
UpperCamelCase = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(A_ , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
UpperCamelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(A_ ) == num_samples
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=A_ )
UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(A_ )
# shard inputs and rng
UpperCamelCase = replicate(A_ )
UpperCamelCase = jax.random.split(A_ , A_ )
UpperCamelCase = shard(A_ )
UpperCamelCase = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=A_ )
UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(A_ )
# shard inputs and rng
UpperCamelCase = replicate(A_ )
UpperCamelCase = jax.random.split(A_ , A_ )
UpperCamelCase = shard(A_ )
UpperCamelCase = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(A_ )
# shard inputs and rng
UpperCamelCase = replicate(A_ )
UpperCamelCase = jax.random.split(A_ , A_ )
UpperCamelCase = shard(A_ )
UpperCamelCase = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=A_ , steps_offset=1 , )
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=A_ , safety_checker=A_ , )
UpperCamelCase = scheduler.create_state()
UpperCamelCase = scheduler_state
UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(A_ )
# shard inputs and rng
UpperCamelCase = replicate(A_ )
UpperCamelCase = jax.random.split(A_ , A_ )
UpperCamelCase = shard(A_ )
UpperCamelCase = pipeline(A_ , A_ , A_ , A_ , jit=A_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(A_ , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = jax.random.split(jax.random.PRNGKey(0 ) , A_ )
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=A_ , )
UpperCamelCase = replicate(A_ )
UpperCamelCase = pipeline.prepare_inputs(A_ )
UpperCamelCase = shard(A_ )
UpperCamelCase = pipeline(A_ , A_ , A_ , jit=A_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=A_ , use_memory_efficient_attention=A_ , )
UpperCamelCase = replicate(A_ )
UpperCamelCase = pipeline.prepare_inputs(A_ )
UpperCamelCase = shard(A_ )
UpperCamelCase = pipeline(A_ , A_ , A_ , jit=A_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 3
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def A ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[Any] ) -> Optional[int]:
for attribute in key.split(""".""" ):
UpperCamelCase__ :Dict = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
UpperCamelCase__ :Dict = getattr(lowercase__ , lowercase__ ).shape
else:
UpperCamelCase__ :Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase__ :int = value
elif weight_type == "weight_g":
UpperCamelCase__ :Optional[Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ :Optional[Any] = value
elif weight_type == "bias":
UpperCamelCase__ :Any = value
else:
UpperCamelCase__ :str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A ( lowercase__ : str , lowercase__ : Dict , lowercase__ : Dict ) -> Any:
UpperCamelCase__ :Optional[Any] = []
UpperCamelCase__ :List[str] = fairseq_model.state_dict()
UpperCamelCase__ :Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ :Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase__ :Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ :Optional[Any] = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
UpperCamelCase__ :str = True
if "*" in mapped_key:
UpperCamelCase__ :str = name.split(lowercase__ )[0].split(""".""" )[-2]
UpperCamelCase__ :int = mapped_key.replace("""*""" , lowercase__ )
if "weight_g" in name:
UpperCamelCase__ :List[Any] = """weight_g"""
elif "weight_v" in name:
UpperCamelCase__ :Optional[Any] = """weight_v"""
elif "weight" in name:
UpperCamelCase__ :List[Any] = """weight"""
elif "bias" in name:
UpperCamelCase__ :Union[str, Any] = """bias"""
else:
UpperCamelCase__ :str = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def A ( lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Dict ) -> Tuple:
UpperCamelCase__ :List[str] = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase__ :Any = name.split(""".""" )
UpperCamelCase__ :str = int(items[0] )
UpperCamelCase__ :Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase__ :Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase__ :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase__ :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase__ :str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Union[str, Any]=None , lowercase__ : Any=None , lowercase__ : int=True ) -> Optional[int]:
if config_path is not None:
UpperCamelCase__ :List[str] = HubertConfig.from_pretrained(lowercase__ )
else:
UpperCamelCase__ :int = HubertConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ :int = Dictionary.load(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ :Any = target_dict.pad_index
UpperCamelCase__ :Any = target_dict.bos_index
UpperCamelCase__ :str = target_dict.eos_index
UpperCamelCase__ :Tuple = len(target_dict.symbols )
UpperCamelCase__ :Union[str, Any] = os.path.join(lowercase__ , """vocab.json""" )
if not os.path.isdir(lowercase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowercase__ )
UpperCamelCase__ :Dict = WavaVecaCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase__ , )
UpperCamelCase__ :int = True if config.feat_extract_norm == """layer""" else False
UpperCamelCase__ :Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
UpperCamelCase__ :Optional[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
UpperCamelCase__ :List[str] = HubertForCTC(lowercase__ )
else:
UpperCamelCase__ :Tuple = HubertModel(lowercase__ )
if is_finetuned:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ :Dict = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 383
|
def A ( lowercase__ : str ) -> bool:
UpperCamelCase__ :Any = 0
for ch in input_str:
UpperCamelCase__ :List[Any] = ord(lowercase__ )
UpperCamelCase__ :int = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383
| 1
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ (snake_case__ ):
@slow
@require_torch
def A__ ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = bertabert.config.encoder.vocab_size
lowerCAmelCase__ = tokenizer.sep_token_id
lowerCAmelCase__ = tokenizer.cls_token_id
lowerCAmelCase__ = 1_28
lowerCAmelCase__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
lowerCAmelCase__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
lowerCAmelCase__ = train_dataset.select(range(32 ) )
lowerCAmelCase__ = val_dataset.select(range(16 ) )
lowerCAmelCase__ = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : Union[str, Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase__ = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__lowercase , max_length=5_12 )
lowerCAmelCase__ = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__lowercase , max_length=1_28 )
lowerCAmelCase__ = inputs.input_ids
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = outputs.input_ids
lowerCAmelCase__ = outputs.input_ids.copy()
lowerCAmelCase__ = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowerCAmelCase__ = outputs.attention_mask
assert all(len(__lowercase ) == 5_12 for x in inputs.input_ids )
assert all(len(__lowercase ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
lowerCAmelCase__ = pred.label_ids
lowerCAmelCase__ = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase__ = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase__ = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowercase ) )] ) / len(__lowercase )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowercase , batch_size=__lowercase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
lowerCAmelCase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowercase , batch_size=__lowercase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = SeqaSeqTrainingArguments(
output_dir=__lowercase , per_device_train_batch_size=__lowercase , per_device_eval_batch_size=__lowercase , predict_with_generate=__lowercase , evaluation_strategy='''steps''' , do_train=__lowercase , do_eval=__lowercase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCAmelCase__ = SeqaSeqTrainer(
model=__lowercase , args=__lowercase , compute_metrics=_compute_metrics , train_dataset=__lowercase , eval_dataset=__lowercase , tokenizer=__lowercase , )
# start training
trainer.train()
| 615
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Tuple = """marian"""
__snake_case : Any = ["""past_key_values"""]
__snake_case : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[Any] , __lowercase :Any=5_8101 , __lowercase :Tuple=None , __lowercase :Union[str, Any]=1024 , __lowercase :Dict=12 , __lowercase :int=4096 , __lowercase :int=16 , __lowercase :List[Any]=12 , __lowercase :Dict=4096 , __lowercase :Dict=16 , __lowercase :Tuple=0.0 , __lowercase :Tuple=0.0 , __lowercase :List[Any]=True , __lowercase :int=True , __lowercase :Tuple="gelu" , __lowercase :str=1024 , __lowercase :Optional[int]=0.1 , __lowercase :List[str]=0.0 , __lowercase :Union[str, Any]=0.0 , __lowercase :Dict=0.02 , __lowercase :Tuple=5_8100 , __lowercase :Optional[Any]=False , __lowercase :int=5_8100 , __lowercase :Any=0 , __lowercase :str=0 , __lowercase :str=True , **__lowercase :Any , ):
__lowerCamelCase : List[Any] =vocab_size
__lowerCamelCase : Optional[int] =decoder_vocab_size or vocab_size
__lowerCamelCase : Tuple =max_position_embeddings
__lowerCamelCase : List[Any] =d_model
__lowerCamelCase : Any =encoder_ffn_dim
__lowerCamelCase : str =encoder_layers
__lowerCamelCase : List[str] =encoder_attention_heads
__lowerCamelCase : str =decoder_ffn_dim
__lowerCamelCase : Tuple =decoder_layers
__lowerCamelCase : Any =decoder_attention_heads
__lowerCamelCase : List[Any] =dropout
__lowerCamelCase : Any =attention_dropout
__lowerCamelCase : Union[str, Any] =activation_dropout
__lowerCamelCase : Optional[int] =activation_function
__lowerCamelCase : Dict =init_std
__lowerCamelCase : List[Any] =encoder_layerdrop
__lowerCamelCase : Optional[Any] =decoder_layerdrop
__lowerCamelCase : Any =use_cache
__lowerCamelCase : Any =encoder_layers
__lowerCamelCase : Optional[int] =scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase : int =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowercase ( self :int ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase : Optional[Any] ={0: '''batch'''}
__lowerCamelCase : Any ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowerCamelCase : Optional[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
__lowerCamelCase : List[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase : Any =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : List[str] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowerCamelCase : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowercase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =super().outputs
else:
__lowerCamelCase : List[str] =super(__lowercase , self ).outputs
if self.use_past:
__lowerCamelCase , __lowerCamelCase : int =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Any ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowercase ( self :str , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
__lowerCamelCase : Optional[Any] =seq_length if not self.use_past else 1
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__lowerCamelCase : Dict ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase : str =dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
__lowerCamelCase : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
__lowerCamelCase , __lowerCamelCase : Optional[int] =self.num_attention_heads
__lowerCamelCase : Any =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Dict =decoder_seq_length + 3
__lowerCamelCase : Tuple =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
__lowerCamelCase : Any =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
__lowerCamelCase : List[Any] =min(__lowercase , __lowercase )
__lowerCamelCase : int =max(__lowercase , __lowercase ) - min_num_layers
__lowerCamelCase : Any ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
__lowerCamelCase : Dict =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowercase ( self :Dict , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCamelCase : str =seqlen + 2
__lowerCamelCase , __lowerCamelCase : List[str] =self.num_layers
__lowerCamelCase , __lowerCamelCase : Optional[Any] =self.num_attention_heads
__lowerCamelCase : List[str] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Any =common_inputs['''attention_mask'''].dtype
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
__lowerCamelCase : List[str] =[
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowercase ( self :int , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase : Union[str, Any] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase : Optional[int] =tokenizer.num_special_tokens_to_add(__lowercase )
__lowerCamelCase : Optional[int] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase : Any =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase : Union[str, Any] =dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def __lowercase ( self :Tuple , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : int =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def __lowercase ( self :Optional[int] , __lowercase :Tuple , __lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : str =super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
__lowerCamelCase : Optional[Any] =super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def __lowercase ( self :List[str] ):
return 1e-4
| 179
| 0
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_snake_case = get_logger(__name__)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = (
os.path.join(SCREAMING_SNAKE_CASE__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase = Extractor
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase = os.path.abspath(SCREAMING_SNAKE_CASE__ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE__ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE__ ) and os.listdir(SCREAMING_SNAKE_CASE__ ))
)
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
UpperCamelCase = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE__ )
if not extractor_format:
return input_path
UpperCamelCase = self._get_output_path(SCREAMING_SNAKE_CASE__ )
if self._do_extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output_path
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
@abstractmethod
def __lowerCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Union[Path, str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
...
class _lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[bytes] =[]
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
return f.read(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : bytes = b"" ):
"""simple docstring"""
if not magic_number:
UpperCamelCase = max(len(SCREAMING_SNAKE_CASE__ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase = cls.read_magic_number(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE__ ) for cls_magic_number in cls.magic_numbers )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[Path, str] , **SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
def resolved(SCREAMING_SNAKE_CASE__ : str ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
def badpath(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).startswith(SCREAMING_SNAKE_CASE__ )
def badlink(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase = resolved(os.path.join(SCREAMING_SNAKE_CASE__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = resolved(SCREAMING_SNAKE_CASE__ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE__ ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = tarfile.open(SCREAMING_SNAKE_CASE__ )
tar_file.extractall(SCREAMING_SNAKE_CASE__ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
tar_file.close()
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =[B"\x1F\x8B"]
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
with gzip.open(SCREAMING_SNAKE_CASE__ , 'rb' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[
B"PK\x03\x04",
B"PK\x05\x06", # empty archive
B"PK\x07\x08", # spanned archive
]
@classmethod
def __lowerCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(SCREAMING_SNAKE_CASE__ , magic_number=SCREAMING_SNAKE_CASE__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as fp:
UpperCamelCase = _EndRecData(SCREAMING_SNAKE_CASE__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase = fp.read(SCREAMING_SNAKE_CASE__ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE__ ) == sizeCentralDir:
UpperCamelCase = struct.unpack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , 'r' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE__ )
zip_file.close()
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[B"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
with lzma.open(SCREAMING_SNAKE_CASE__ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =[B"Rar!\x1a\x07\x00", B"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = rarfile.RarFile(SCREAMING_SNAKE_CASE__ )
rf.extractall(SCREAMING_SNAKE_CASE__ )
rf.close()
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =[B"\x28\xb5\x2F\xFD"]
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
UpperCamelCase = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as ifh, open(SCREAMING_SNAKE_CASE__ , 'wb' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =[B"\x42\x5A\x68"]
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
with bza.open(SCREAMING_SNAKE_CASE__ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =[B"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE__ , 'r' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =[B"\x04\x22\x4D\x18"]
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE__ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase :
"""simple docstring"""
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
SCREAMING_SNAKE_CASE_ : Dict[str, Type[BaseExtractor]] ={
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCAmelCase ( cls : int ):
"""simple docstring"""
return max(
len(SCREAMING_SNAKE_CASE__ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE__ , magic_number_length=SCREAMING_SNAKE_CASE__ )
except OSError:
return b""
@classmethod
def __lowerCAmelCase ( cls : int , SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = cls.infer_extractor_format(SCREAMING_SNAKE_CASE__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCAmelCase ( cls : int , SCREAMING_SNAKE_CASE__ : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
UpperCamelCase = cls._get_magic_number_max_length()
UpperCamelCase = cls._read_magic_number(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE__ , magic_number=SCREAMING_SNAKE_CASE__ ):
return extractor_format
@classmethod
def __lowerCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Union[Path, str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[BaseExtractor] = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , exist_ok=SCREAMING_SNAKE_CASE__ )
# Prevent parallel extractions
UpperCamelCase = str(Path(SCREAMING_SNAKE_CASE__ ).with_suffix('.lock' ) )
with FileLock(SCREAMING_SNAKE_CASE__ ):
shutil.rmtree(SCREAMING_SNAKE_CASE__ , ignore_errors=SCREAMING_SNAKE_CASE__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = extractor if extractor != 'deprecated' else extractor_format
else:
UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=SCREAMING_SNAKE_CASE__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE__ ):
return extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 170
|
import math
import sys
def __lowerCamelCase ( _lowercase ) -> int:
if number != int(_lowercase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
UpperCamelCase = [-1] * (number + 1)
UpperCamelCase = 0
for i in range(1 , number + 1 ):
UpperCamelCase = sys.maxsize
UpperCamelCase = int(math.sqrt(_lowercase ) )
for j in range(1 , root + 1 ):
UpperCamelCase = 1 + answers[i - (j**2)]
UpperCamelCase = min(_lowercase , _lowercase )
UpperCamelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
if isinstance(A_, A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any], A_ : Tuple, A_ : Dict, A_ : Optional[Any]=False ):
'''simple docstring'''
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_lowerCamelCase : Any = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_lowerCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_lowerCamelCase : Any = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_lowerCamelCase : str = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( A_ : Optional[int], A_ : Tuple, A_ : Union[str, Any], A_ : Tuple, A_ : str=None ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.norm.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.norm.bias''']
_lowerCamelCase : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : str = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( A_ : str, A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : List[str] = {}
_lowerCamelCase : str = checkpoint['''time_embed.0.weight''']
_lowerCamelCase : Tuple = checkpoint['''time_embed.0.bias''']
_lowerCamelCase : str = checkpoint['''time_embed.2.weight''']
_lowerCamelCase : List[str] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : List[str] = checkpoint['''label_emb.weight''']
_lowerCamelCase : Any = checkpoint['''input_blocks.0.0.weight''']
_lowerCamelCase : Dict = checkpoint['''input_blocks.0.0.bias''']
_lowerCamelCase : str = unet_config['''down_block_types''']
_lowerCamelCase : int = unet_config['''layers_per_block''']
_lowerCamelCase : Any = unet_config['''attention_head_dim''']
_lowerCamelCase : Optional[int] = unet_config['''block_out_channels''']
_lowerCamelCase : int = 1
_lowerCamelCase : List[Any] = channels_list[0]
for i, layer_type in enumerate(A_ ):
_lowerCamelCase : Tuple = channels_list[i]
_lowerCamelCase : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
_lowerCamelCase : Union[str, Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
_lowerCamelCase : Optional[Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : int = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : List[str] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : Tuple = F'''down_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Any = F'''input_blocks.{current_layer}.1'''
_lowerCamelCase : Optional[Any] = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Dict = F'''down_blocks.{i}.downsamplers.0'''
_lowerCamelCase : Any = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : List[Any] = convert_resnet(A_, A_, A_, A_ )
current_layer += 1
_lowerCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Union[str, Any] = '''mid_block.resnets.0'''
_lowerCamelCase : Dict = '''middle_block.0'''
_lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : List[str] = '''mid_block.attentions.0'''
_lowerCamelCase : Dict = '''middle_block.1'''
_lowerCamelCase : Union[str, Any] = convert_attention(A_, A_, A_, A_, A_ )
_lowerCamelCase : Tuple = '''mid_block.resnets.1'''
_lowerCamelCase : Optional[int] = '''middle_block.2'''
_lowerCamelCase : Any = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : str = 0
_lowerCamelCase : Any = unet_config['''up_block_types''']
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : List[str] = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Optional[Any] = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : int = F'''output_blocks.{current_layer-1}.1'''
_lowerCamelCase : str = convert_resnet(A_, A_, A_, A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Any = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Optional[Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : str = F'''up_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Tuple = F'''output_blocks.{current_layer}.1'''
_lowerCamelCase : int = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Any = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_lowerCamelCase : List[str] = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : str = checkpoint['''out.0.weight''']
_lowerCamelCase : Any = checkpoint['''out.0.bias''']
_lowerCamelCase : int = checkpoint['''out.2.weight''']
_lowerCamelCase : Optional[Any] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 83
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=[1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 4] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=0.0_2 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCAmelCase__ : Union[str, Any]=[1, 2, 3] , ) -> str:
_a : Union[str, Any] = parent
_a : str = batch_size
_a : int = image_size
_a : Optional[Any] = patch_size
_a : Tuple = num_channels
_a : str = embed_dim
_a : int = depths
_a : List[Any] = num_heads
_a : int = window_size
_a : Optional[int] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Dict = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = drop_path_rate
_a : List[Any] = hidden_act
_a : List[Any] = use_absolute_embeddings
_a : Optional[Any] = patch_norm
_a : Dict = layer_norm_eps
_a : Dict = initializer_range
_a : Union[str, Any] = is_training
_a : List[str] = scope
_a : Any = use_labels
_a : Any = type_sequence_label_size
_a : Dict = encoder_stride
_a : Optional[int] = out_features
_a : Any = out_indices
def _lowercase ( self : Optional[Any] ) -> int:
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> List[str]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
_a : int = MaskFormerSwinModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Any = model(UpperCAmelCase__ )
_a : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[Any]:
_a : Optional[Any] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Union[str, Any] = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase__ ):
_a : Dict = ["""stem"""]
_a : Optional[int] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a : str = config_and_inputs
_a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Any ) -> int:
_a : Optional[int] = MaskFormerSwinModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : int ) -> str:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Tuple ) -> Union[str, Any]:
return
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : str ) -> Tuple:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Dict ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : Optional[Any] ) -> Dict:
pass
def _lowercase ( self : int ) -> Union[str, Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : Any ) -> Tuple:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Optional[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : Any ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a : Dict = outputs.hidden_states
_a : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# Swin has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : str ) -> Dict:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = 3
_a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Any ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
pass
def _lowercase ( self : Dict ) -> List[str]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase__ : List[Any] ):
_a : int = 0
return t
def check_equivalence(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]={} ):
with torch.no_grad():
_a : Union[str, Any] = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Any = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ ).to_tuple()
def recursive_check(UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
if isinstance(UpperCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase__ ) , set_nan_tensor_to_zero(UpperCAmelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}."""
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : List[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : Tuple = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
UpperCamelCase : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase : Dict = MaskFormerSwinConfig
def _lowercase ( self : int ) -> int:
_a : Union[str, Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_a : Optional[int] = backbone_class(UpperCAmelCase__ )
backbone.to(UpperCAmelCase__ )
backbone.eval()
_a : Any = backbone(**UpperCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_a : Union[str, Any] = backbone(**UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_a , _a , _a : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_a : Any = backbone(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 389
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = self.tool("""hey""" )
_lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = self.tool("""hey""" )
_lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 162
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowercase = 300 # TEMPERATURE (unit = K)
def A (__lowerCamelCase :float , __lowerCamelCase :float , __lowerCamelCase :float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
| 1
|
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = order
# a_{0} ... a_{k}
lowercase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowercase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowercase = [0.0] * self.order
# y[n-1] ... y[n-k]
lowercase = [0.0] * self.order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if len(snake_case ) < self.order:
lowercase = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
lowercase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(snake_case )}'''
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
lowercase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(snake_case )}'''
)
raise ValueError(snake_case )
lowercase = a_coeffs
lowercase = b_coeffs
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowercase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowercase = self.input_history[:-1]
lowercase = self.output_history[:-1]
lowercase = sample
lowercase = result
return result
| 84
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : str = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 410
|
from __future__ import annotations
import math
import random
from typing import Any
class A__ :
'''simple docstring'''
def __init__( self : str ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.head == self.tail
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
self.data.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tail + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.data[self.head]
UpperCamelCase = self.head + 1
return ret
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.tail - self.head
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class A__ :
'''simple docstring'''
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.left
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.right
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.height
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : MyNode | None ):
"""simple docstring"""
UpperCamelCase = node
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : MyNode | None ):
"""simple docstring"""
UpperCamelCase = node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = height
def lowercase__ ( _UpperCamelCase) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
print('left rotation node:' , node.get_data())
UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
UpperCamelCase = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_UpperCamelCase)
return ret
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
print('right rotation node:' , node.get_data())
UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
UpperCamelCase = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_UpperCamelCase)
return ret
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_UpperCamelCase))
return right_rotation(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_UpperCamelCase))
return left_rotation(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(_UpperCamelCase)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _UpperCamelCase))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase = right_rotation(_UpperCamelCase)
else:
UpperCamelCase = lr_rotation(_UpperCamelCase)
else:
node.set_right(insert_node(node.get_right() , _UpperCamelCase))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase = rl_rotation(_UpperCamelCase)
else:
UpperCamelCase = left_rotation(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
return node
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
while True:
UpperCamelCase = root.get_right()
if right_child is None:
break
UpperCamelCase = right_child
return root.get_data()
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
while True:
UpperCamelCase = root.get_left()
if left_child is None:
break
UpperCamelCase = left_child
return root.get_data()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> MyNode | None:
"""simple docstring"""
UpperCamelCase = root.get_left()
UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase = get_left_most(_UpperCamelCase)
root.set_data(_UpperCamelCase)
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase))
elif left_child is not None:
UpperCamelCase = left_child
elif right_child is not None:
UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data')
return root
else:
root.set_left(del_node(_UpperCamelCase , _UpperCamelCase))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase))
if get_height(_UpperCamelCase) - get_height(_UpperCamelCase) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
UpperCamelCase = left_rotation(_UpperCamelCase)
else:
UpperCamelCase = rl_rotation(_UpperCamelCase)
elif get_height(_UpperCamelCase) - get_height(_UpperCamelCase) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
UpperCamelCase = right_rotation(_UpperCamelCase)
else:
UpperCamelCase = lr_rotation(_UpperCamelCase)
UpperCamelCase = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(_UpperCamelCase)
return root
class A__ :
'''simple docstring'''
def __init__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return get_height(self.root )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
print('insert:' + str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = insert_node(self.root , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
print('delete:' + str(_SCREAMING_SNAKE_CASE ) )
if self.root is None:
print('Tree is empty!' )
return
UpperCamelCase = del_node(self.root , _SCREAMING_SNAKE_CASE )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
UpperCamelCase = ''
UpperCamelCase = MyQueue()
q.push(self.root )
UpperCamelCase = self.get_height()
if layer == 0:
return output
UpperCamelCase = 0
while not q.is_empty():
UpperCamelCase = q.pop()
UpperCamelCase = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_SCREAMING_SNAKE_CASE )
q.push(_SCREAMING_SNAKE_CASE )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__magic_name__ : Any = AVLtree()
__magic_name__ : str = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 410
| 1
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def lowerCamelCase ( self : str , lowerCamelCase : str ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
raise NotImplementedError()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : "AutoTokenizer" , lowerCamelCase : bool = False , **lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = skip_prompt
_UpperCAmelCase = decode_kwargs
# variables used in the streaming process
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = True
def lowerCamelCase ( self : Any , lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
_UpperCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCAmelCase = text[self.print_len :]
self.print_len += len(lowerCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCAmelCase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(lowerCamelCase )
self.on_finalized_text(lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
else:
_UpperCAmelCase = """"""
_UpperCAmelCase = True
self.on_finalized_text(lowerCamelCase , stream_end=lowerCamelCase )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Any:
"""simple docstring"""
print(lowerCamelCase , flush=lowerCamelCase , end="""""" if not stream_end else None )
def lowerCamelCase ( self : int , lowerCamelCase : Any ) -> int:
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : "AutoTokenizer" , lowerCamelCase : bool = False , lowerCamelCase : Optional[float] = None , **lowerCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = Queue()
_UpperCAmelCase = None
_UpperCAmelCase = timeout
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Optional[int]:
"""simple docstring"""
self.text_queue.put(lowerCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 108
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(snake_case )
class UpperCAmelCase_ :
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
elif titles is None or texts is None:
__lowercase : int = titles if texts is None else texts
return super().__call__(
UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles]
__lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts]
__lowercase : str = len(UpperCamelCase_ )
__lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" )
__lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids''']
__lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids''']
__lowercase : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ )
]
}
if return_attention_mask is not False:
__lowercase : str = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase : List[str] = attention_mask
return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]:
__lowercase : List[Any] = reader_input['''input_ids''']
__lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3]
__lowercase : Optional[int] = len(UpperCamelCase_ )
__lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ )
__lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__lowercase : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
__lowercase : List[Any] = len(UpperCamelCase_ )
__lowercase : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]:
__lowercase : Tuple = []
for start_index, start_score in enumerate(UpperCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ )
__lowercase : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
__lowercase : Any = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class UpperCAmelCase_ ( snake_case , snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase =["input_ids", "attention_mask"]
| 76
| 0
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 50 ) -> int:
"""simple docstring"""
__snake_case = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_SCREAMING_SNAKE_CASE = ["""text""", """image""", """audio"""]
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = []
for output in outputs:
if isinstance(SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append("text" )
elif isinstance(SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __magic_name__ :
def lowerCAmelCase ( self : Optional[int] ):
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
__snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase ( self : int ):
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__snake_case = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowerCAmelCase ( self : Union[str, Any] ):
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase ( self : Any ):
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__snake_case = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowerCAmelCase ( self : Tuple ):
__snake_case = create_inputs(self.tool.inputs )
__snake_case = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__snake_case = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__snake_case = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
| 614
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = {}
state_dict.pop("pixel_mean" , snake_case__)
state_dict.pop("pixel_std" , snake_case__)
lowerCAmelCase_ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ : Dict = key.replace(snake_case__ , snake_case__)
if re.match(snake_case__ , snake_case__):
lowerCAmelCase_ : Any = int(re.match(snake_case__ , snake_case__).group(2))
if layer_nb == 0:
lowerCAmelCase_ : List[Any] = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCAmelCase_ : List[Any] = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCAmelCase_ : int = key.replace("layers.2" , "proj_out")
lowerCAmelCase_ : int = value
lowerCAmelCase_ : Optional[int] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything"):
lowerCAmelCase_ : Optional[int] = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
lowerCAmelCase_ : Optional[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ : Optional[int] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ : Union[str, Any] = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ : Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ : Tuple = SamConfig(
vision_config=snake_case__ , )
lowerCAmelCase_ : Optional[Any] = torch.load(snake_case__ , map_location="cpu")
lowerCAmelCase_ : Union[str, Any] = replace_keys(snake_case__)
lowerCAmelCase_ : List[Any] = SamImageProcessor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=snake_case__)
lowerCAmelCase_ : Any = SamModel(snake_case__)
hf_model.load_state_dict(snake_case__)
lowerCAmelCase_ : Dict = hf_model.to("cuda")
lowerCAmelCase_ : List[str] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCAmelCase_ : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("RGB")
lowerCAmelCase_ : Optional[int] = [[[4_00, 6_50]]]
lowerCAmelCase_ : int = [[1]]
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ : Any = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ : Tuple = ((75, 2_75, 17_25, 8_50),)
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , input_boxes=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : List[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ : int = [[[4_00, 6_50], [8_00, 6_50]]]
lowerCAmelCase_ : Optional[Any] = [[1, 1]]
lowerCAmelCase_ : List[Any] = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Tuple = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_lowercase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 659
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 705
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase = TypeVar("""T""")
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = True ):
"""simple docstring"""
a__ : dict[T, list[T]] = {} # dictionary of lists
a__ : Dict = directed
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
self.adj_list[destination_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
a__ : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCAmelCase )
a__ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a__ : str = [destination_vertex]
a__ : int = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
a__ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a__ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a__ : List[str] = [destination_vertex]
a__ : int = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 207
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
a: Optional[Any] = StableDiffusionInpaintPipeline
a: Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
a: str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a: Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a: Optional[int] = frozenset([] )
def _A ( self: Dict ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , )
_a = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
_a = CLIPTextModel(__UpperCamelCase )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _A ( self: Any , __UpperCamelCase: Any , __UpperCamelCase: Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
_a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(__UpperCamelCase ).startswith('''mps''' ):
_a = torch.manual_seed(__UpperCamelCase )
else:
_a = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _A ( self: Union[str, Any] ):
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionInpaintPipeline(**__UpperCamelCase )
_a = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = self.get_dummy_inputs(__UpperCamelCase )
_a = sd_pipe(**__UpperCamelCase ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self: List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: List[str] ):
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
_a = '''stabilityai/stable-diffusion-2-inpainting'''
_a = StableDiffusionInpaintPipeline.from_pretrained(__UpperCamelCase , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
_a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , generator=__UpperCamelCase , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _A ( self: Any ):
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
_a = '''stabilityai/stable-diffusion-2-inpainting'''
_a = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
_a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , generator=__UpperCamelCase , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _A ( self: List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_a = '''stabilityai/stable-diffusion-2-inpainting'''
_a = PNDMScheduler.from_pretrained(__UpperCamelCase , subfolder='''scheduler''' )
_a = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , scheduler=__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=2 , output_type='''np''' , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 487
|
def __snake_case ( _UpperCamelCase ) -> int:
_a = len(_UpperCamelCase )
_a = sum(_UpperCamelCase )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_a = True
for i in range(1 , s + 1 ):
_a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 487
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any = 'fnet'
def __init__( self , lowerCamelCase__=3_2_0_0_0 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=4 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-12 , lowerCamelCase__=False , lowerCamelCase__=5_1_2 , lowerCamelCase__=3 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = type_vocab_size
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = use_tpu_fourier_optimizations
_lowerCamelCase = tpu_short_seq_length
| 623
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
| 1
|
"""simple docstring"""
import qiskit
def UpperCAmelCase ( snake_case : int , snake_case : int ):
_lowerCAmelCase:Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase:Optional[int] = qiskit.QuantumCircuit(snake_case , snake_case )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowerCAmelCase:Any = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case )
if __name__ == "__main__":
UpperCamelCase__ = single_qubit_measure(2, 2)
print(F"Total count for various states are: {counts}")
| 227
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( snake_case : BertModel , snake_case : str , snake_case : str ):
_lowerCAmelCase:Dict = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_lowerCAmelCase:Optional[int] = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(snake_case ):
os.makedirs(snake_case )
_lowerCAmelCase:Dict = model.state_dict()
def to_tf_var_name(snake_case : str ):
for patt, repl in iter(snake_case ):
_lowerCAmelCase:Any = name.replace(snake_case , snake_case )
return F'bert/{name}'
def create_tf_var(snake_case : np.ndarray , snake_case : str , snake_case : tf.Session ):
_lowerCAmelCase:Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase:Any = tf.get_variable(dtype=snake_case , shape=tensor.shape , name=snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase:List[str] = to_tf_var_name(snake_case )
_lowerCAmelCase:int = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase:int = torch_tensor.T
_lowerCAmelCase:int = create_tf_var(tensor=snake_case , name=snake_case , session=snake_case )
tf.keras.backend.set_value(snake_case , snake_case )
_lowerCAmelCase:Any = session.run(snake_case )
print(F'Successfully created {tf_name}: {np.allclose(snake_case , snake_case )}' )
_lowerCAmelCase:Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case , os.path.join(snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCAmelCase ( snake_case : Optional[int]=None ):
_lowerCAmelCase:Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=snake_case , required=snake_case , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=snake_case , default=snake_case , required=snake_case , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=snake_case , required=snake_case , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=snake_case , required=snake_case , help='''Directory in which to save tensorflow model''' )
_lowerCAmelCase:Tuple = parser.parse_args(snake_case )
_lowerCAmelCase:Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 227
| 1
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
__SCREAMING_SNAKE_CASE = 6_378_137.0
__SCREAMING_SNAKE_CASE = 6_356_752.314_245
__SCREAMING_SNAKE_CASE = 6_378_137
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : List[Any] =(AXIS_A - AXIS_B) / AXIS_A
UpperCamelCase_ : Dict =atan((1 - flattening) * tan(radians(__lowercase ) ) )
UpperCamelCase_ : str =atan((1 - flattening) * tan(radians(__lowercase ) ) )
UpperCamelCase_ : List[Any] =radians(__lowercase )
UpperCamelCase_ : str =radians(__lowercase )
# Equation
UpperCamelCase_ : str =sin((phi_a - phi_a) / 2 )
UpperCamelCase_ : Union[str, Any] =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCamelCase_ : str =sqrt(sin_sq_phi + (cos(__lowercase ) * cos(__lowercase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a__ ( A__ ):
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'depth_multiplier' ) )
class a__ :
def __init__( self :Tuple , _lowerCamelCase :int , _lowerCamelCase :Optional[Any]=13 , _lowerCamelCase :List[Any]=3 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :str=0.25 , _lowerCamelCase :str=8 , _lowerCamelCase :str=8 , _lowerCamelCase :Tuple=6 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :int=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :Tuple="relu6" , _lowerCamelCase :List[Any]=1_280 , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Optional[Any]=0.02 , _lowerCamelCase :Dict=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :List[str]=10 , _lowerCamelCase :List[Any]=None , ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =parent
UpperCamelCase_ : Optional[Any] =batch_size
UpperCamelCase_ : List[str] =num_channels
UpperCamelCase_ : Union[str, Any] =image_size
UpperCamelCase_ : Union[str, Any] =depth_multiplier
UpperCamelCase_ : Optional[Any] =depth_divisible_by
UpperCamelCase_ : Optional[Any] =min_depth
UpperCamelCase_ : List[Any] =expand_ratio
UpperCamelCase_ : Any =tf_padding
UpperCamelCase_ : List[str] =output_stride
UpperCamelCase_ : Tuple =first_layer_is_expansion
UpperCamelCase_ : Any =finegrained_output
UpperCamelCase_ : Dict =hidden_act
UpperCamelCase_ : int =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase_ : Optional[int] =classifier_dropout_prob
UpperCamelCase_ : str =use_labels
UpperCamelCase_ : List[Any] =is_training
UpperCamelCase_ : Tuple =num_labels
UpperCamelCase_ : Optional[int] =initializer_range
UpperCamelCase_ : Union[str, Any] =scope
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Dict =None
UpperCamelCase_ : Dict =None
if self.use_labels:
UpperCamelCase_ : List[str] =ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ : Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Tuple , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =MobileNetVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[Any] =model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.num_labels
UpperCamelCase_ : List[str] =MobileNetVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[str] =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :str , _lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.num_labels
UpperCamelCase_ : int =MobileNetVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Dict =model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_ : int =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : int =config_and_inputs
UpperCamelCase_ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =MobileNetVaModelTester(self )
UpperCamelCase_ : Any =MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple =model_class(_lowerCamelCase )
UpperCamelCase_ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] =[*signature.parameters.keys()]
UpperCamelCase_ : List[str] =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] ):
UpperCamelCase_ : List[str] =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : str =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_ : Optional[Any] =outputs.hidden_states
UpperCamelCase_ : List[str] =16
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Dict =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Dict =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[str] =MobileNetVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def A_ ( ):
UpperCamelCase_ : Dict =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self.default_image_processor
UpperCamelCase_ : List[Any] =prepare_img()
UpperCamelCase_ : List[str] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : List[Any] =model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_ : Optional[int] =torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_ : Optional[Any] =torch.tensor([0.2445, -1.1993, 0.1905] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Dict =MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_ : Optional[int] =model.to(_lowerCamelCase )
UpperCamelCase_ : Dict =MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_ : Any =prepare_img()
UpperCamelCase_ : List[Any] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Any =model(**_lowerCamelCase )
UpperCamelCase_ : Any =outputs.logits
# verify the logits
UpperCamelCase_ : Optional[int] =torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_ : List[Any] =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 395
| 0
|
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26
|
def lowercase ( a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = [False] * len(a )
SCREAMING_SNAKE_CASE_ :List[Any] = []
queue.append(a )
SCREAMING_SNAKE_CASE_ :int = True
while queue:
SCREAMING_SNAKE_CASE_ :int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(a )
SCREAMING_SNAKE_CASE_ :Tuple = True
SCREAMING_SNAKE_CASE_ :Optional[int] = u
return visited[t]
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = [-1] * (len(a ))
SCREAMING_SNAKE_CASE_ :Tuple = 0
while bfs(a , a , a , a ):
SCREAMING_SNAKE_CASE_ :List[Any] = float("Inf" )
SCREAMING_SNAKE_CASE_ :str = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE_ :str = min(a , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE_ :Dict = sink
while v != source:
SCREAMING_SNAKE_CASE_ :int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE_ :Any = parent[v]
return max_flow
SCREAMING_SNAKE_CASE__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 631
| 0
|
"""simple docstring"""
def _lowerCamelCase( a ):
__a = len(a )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(a )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(a )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__:List[Any] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 67
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 'yolos'
def __init__( self , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=[512, 864] , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=100 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = num_detection_tokens
UpperCAmelCase_ = use_mid_position_embeddings
UpperCAmelCase_ = auxiliary_loss
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = version.parse('1.11' )
@property
def A__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A__ ( self ):
return 1e-4
@property
def A__ ( self ):
return 12
| 579
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class lowerCamelCase (__lowerCAmelCase ):
'''simple docstring'''
a = ['''pixel_values''']
def __init__( self : List[Any] , _snake_case : List[str] = True , _snake_case : List[str] = None , _snake_case : Dict = PILImageResampling.BICUBIC , _snake_case : Tuple = True , _snake_case : Union[str, Any] = True , _snake_case : int = 1 / 255 , _snake_case : Dict = None , _snake_case : Union[str, Any] = True , _snake_case : str = None , _snake_case : Dict = None , **_snake_case : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : str = PILImageResampling.BILINEAR , _snake_case : Union[str, Any] = None , **_snake_case : List[str] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE__ = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""" )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[int] = None , **_snake_case : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase_ ( self : Dict , _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[Any] = None , **_snake_case : Dict ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : List[Any] , _snake_case : int , _snake_case : str = None , **_snake_case : Dict , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase_ ( self : str , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] = None , _snake_case : Optional[Any] = None , _snake_case : str = None , _snake_case : Any = None , _snake_case : str = None , _snake_case : int = None , _snake_case : List[str] = None , _snake_case : Optional[Any] = None , _snake_case : Union[str, Any] = None , _snake_case : List[str] = None , _snake_case : Any = None , _snake_case : List[Any] = ChannelDimension.FIRST , **_snake_case : List[str] , ) -> BatchFeature:
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(lowerCAmelCase_ , param_name="crop_size" , default_to_square=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 717
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 538
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE: Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: List[str] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360
|
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
a_ = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
a_ = -1
return False
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int ):
"""simple docstring"""
a_ = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 536
| 0
|
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :List[str] = name
snake_case__ :Any = value
snake_case__ :Optional[int] = weight
def __repr__( self ) -> Any:
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return self.value
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return self.name
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return self.weight
def lowerCAmelCase_ ( self ) -> Dict:
return self.value / self.weight
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : str ) -> str:
'''simple docstring'''
snake_case__ :List[str] = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowercase_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = sorted(__snake_case , key=__snake_case , reverse=__snake_case )
snake_case__ :Optional[int] = []
snake_case__ , snake_case__ :Tuple = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowercase_ ( ) -> Dict:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660
| 1
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ : Any = HfArgumentParser(lowercase_ )
A__ : List[Any] = parser.parse_args_into_dataclasses()[0]
A__ : str = TensorFlowBenchmark(args=lowercase_ )
try:
A__ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ : List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A__ : Tuple = """ """.join(str(lowercase_ ).split(''' ''' )[:-1] )
A__ : str = """"""
A__ : Tuple = eval(str(lowercase_ ).split(''' ''' )[-1] )
A__ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
A__ : str = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {"""vocab_file""": """vocab.json"""}
_a = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
_a = {"""mgp-str""": 27}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __a , __a="[GO]" , __a="[GO]" , __a="[s]" , __a="[GO]" , **__a) -> Dict:
'''simple docstring'''
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , )
with open(__a , encoding='''utf-8''') as vocab_handle:
_UpperCamelCase = json.load(__a)
_UpperCamelCase = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return len(self.vocab)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder)
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = []
for s in text:
char_tokens.extend(__a)
return char_tokens
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
return self.vocab.get(__a , self.vocab.get(self.unk_token))
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(__a)
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__a):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__a))
return
_UpperCamelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
with open(__a , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a) + '''\n''')
return (vocab_file,)
| 19
|
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = u
for i in range(1 , lowerCAmelCase_ ):
lowercase = temp * (u - i)
return temp
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = int(input("enter the numbers of values: " ) )
lowercase = []
for _ in range(lowerCAmelCase_ ):
y.append([] )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
y[i].append(lowerCAmelCase_ )
lowercase = 0
print("enter the values of parameters in a list: " )
lowercase = list(map(lowerCAmelCase_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCAmelCase_ ):
lowercase = float(input() )
lowercase = int(input("enter the value to interpolate: " ) )
lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase_ ):
for j in range(n - i ):
lowercase = y[j + 1][i - 1] - y[j][i - 1]
lowercase = y[0][0]
for i in range(1 , lowerCAmelCase_ ):
summ += (ucal(lowerCAmelCase_ , lowerCAmelCase_ ) * y[0][i]) / math.factorial(lowerCAmelCase_ )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 711
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase ( _lowercase ):
@staticmethod
def UpperCAmelCase__ (A__ : ArgumentParser ) -> List[Any]:
lowercase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=A__ , default=A__ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=A__ , help="Name of the model to download" )
download_parser.set_defaults(func=A__ )
def __init__(self : Tuple , A__ : str , A__ : str , A__ : bool , A__ : bool ) -> str:
lowercase = model
lowercase = cache
lowercase = force
lowercase = trust_remote_code
def UpperCAmelCase__ (self : Union[str, Any] ) -> Dict:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 459
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : int =logging.getLogger(__name__)
@dataclass(frozen=__a )
class A_ :
_A :str
_A :str
_A :Optional[str] = None
_A :Optional[str] = None
_A :Optional[str] = None
@dataclass(frozen=__a )
class A_ :
_A :List[int]
_A :Optional[List[int]] = None
_A :Optional[List[int]] = None
_A :Optional[Union[int, float]] = None
_A :Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( __a ):
_A :List[InputFeatures]
def __init__( self : List[Any] , snake_case__ : str , snake_case__ : PreTrainedTokenizer , snake_case__ : str , snake_case__ : Optional[int] = None , snake_case__ : Optional[int]=False , snake_case__ : bool = False , ):
lowercase = hans_processors[task]()
lowercase = os.path.join(
snake_case__ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(snake_case__ ) , snake_case__ , ) , )
lowercase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase = label_list[2], label_list[1]
lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase = cached_features_file + """.lock"""
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowercase = torch.load(snake_case__ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowercase = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info("""Training examples: %s""" , len(snake_case__ ) )
lowercase = hans_convert_examples_to_features(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
logger.info("""Saving features into cached file %s""" , snake_case__ )
torch.save(self.features , snake_case__ )
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : int , snake_case__ : int ):
return self.features[i]
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
_A :List[InputFeatures]
def __init__( self : Union[str, Any] , snake_case__ : str , snake_case__ : PreTrainedTokenizer , snake_case__ : str , snake_case__ : Optional[int] = 1_28 , snake_case__ : Union[str, Any]=False , snake_case__ : bool = False , ):
lowercase = hans_processors[task]()
lowercase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase = label_list[2], label_list[1]
lowercase = label_list
lowercase = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
lowercase = hans_convert_examples_to_features(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase = tf.data.Dataset.from_generator(
snake_case__ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
return self.dataset
def __len__( self : int ):
return len(self.features )
def __getitem__( self : Optional[Any] , snake_case__ : Union[str, Any] ):
return self.features[i]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return self.label_list
class A_ ( __a ):
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[Any] ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ , """heuristics_train_set.txt""" ) ) , """train""" )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str] , snake_case__ : str ):
lowercase = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
lowercase = """%s-%s""" % (set_type, line[0])
lowercase = line[5]
lowercase = line[6]
lowercase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowercase = line[0]
examples.append(InputExample(guid=snake_case__ , text_a=snake_case__ , text_b=snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
return examples
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowercase = {label: i for i, label in enumerate(lowerCAmelCase__ )}
lowercase = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ) ,desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowercase = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="""max_length""" ,truncation=lowerCAmelCase__ ,return_overflowing_tokens=lowerCAmelCase__ ,)
lowercase = label_map[example.label] if example.label in label_map else 0
lowercase = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__ ,label=lowerCAmelCase__ ,pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''hans''': 3,
}
__SCREAMING_SNAKE_CASE : List[str] ={
'''hans''': HansProcessor,
}
| 428
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[list[float]]:
__lowerCAmelCase : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCAmelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCAmelCase : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
__lowerCAmelCase : str = matrix[1][1], matrix[0][0]
__lowerCAmelCase : str = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCAmelCase : Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
__lowerCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCAmelCase : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCAmelCase : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCAmelCase : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCAmelCase : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCAmelCase : Optional[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
__lowerCAmelCase : List[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCAmelCase : List[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 712
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> str:
if not (isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Dict = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase : List[str] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase : Tuple = i
__lowerCAmelCase : Any = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = tempfile.mkdtemp()
snake_case: List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case: str = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case: List[str] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: List[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = self.get_tokenizer()
snake_case: Union[str, Any] = self.get_rust_tokenizer()
snake_case: Dict = self.get_image_processor()
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case: str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[Any] = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.prepare_image_inputs()
snake_case: Optional[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
snake_case: Tuple = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = self.get_image_processor()
snake_case: List[Any] = self.get_tokenizer()
snake_case: int = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: int = processor(text=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: Optional[int] = self.get_tokenizer()
snake_case: Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Any = self.prepare_image_inputs()
snake_case: Optional[int] = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_image_processor()
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: Optional[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.get_image_processor()
snake_case: Union[str, Any] = self.get_tokenizer()
snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = 'Alexandra,T-shirt的价格是15便士。'
snake_case: Union[str, Any] = self.prepare_image_inputs()
snake_case: Dict = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 329
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 329
| 1
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE( snake_case_ : Any , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]="attention" ) ->Optional[Any]:
'''simple docstring'''
_lowercase : int = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
_lowercase : Union[str, Any] = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
_lowercase : Union[str, Any] = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
_lowercase : Tuple = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Tuple=False ) ->Dict:
'''simple docstring'''
if split_mlp_wi:
_lowercase : Union[str, Any] = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
_lowercase : Tuple = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
_lowercase : Optional[Any] = (wi_a, wi_a)
else:
_lowercase : int = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
_lowercase : Any = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] ) ->Any:
'''simple docstring'''
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _SCREAMING_SNAKE_CASE( snake_case_ : dict , *, snake_case_ : int , snake_case_ : bool ) ->List[Any]:
'''simple docstring'''
_lowercase : str = traverse_util.flatten_dict(variables['''target'''] )
_lowercase : Dict = {'''/'''.join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowercase : str = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , lowerCAmelCase__ )
_lowercase : int = collections.OrderedDict()
# Shared embeddings.
_lowercase : int = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
_lowercase : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_attention_layer_norm''' )
_lowercase , _lowercase , _lowercase , _lowercase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''attention''' )
_lowercase : Tuple = layer_norm
_lowercase : List[Any] = k.T
_lowercase : Optional[int] = o.T
_lowercase : Tuple = q.T
_lowercase : Tuple = v.T
# Block i, layer 1 (MLP).
_lowercase : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' )
_lowercase , _lowercase : Tuple = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''encoder''' , lowerCAmelCase__ )
_lowercase : Optional[int] = layer_norm
if split_mlp_wi:
_lowercase : Tuple = wi[0].T
_lowercase : List[str] = wi[1].T
else:
_lowercase : Any = wi.T
_lowercase : int = wo.T
_lowercase : Optional[Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
_lowercase : int = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
_lowercase : int = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
_lowercase , _lowercase , _lowercase , _lowercase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''self_attention''' )
_lowercase : List[str] = layer_norm
_lowercase : str = k.T
_lowercase : List[str] = o.T
_lowercase : Optional[Any] = q.T
_lowercase : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
_lowercase : Dict = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
_lowercase , _lowercase , _lowercase , _lowercase : int = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''encoder_decoder_attention''' )
_lowercase : List[Any] = layer_norm
_lowercase : Tuple = k.T
_lowercase : List[str] = o.T
_lowercase : Union[str, Any] = q.T
_lowercase : Any = v.T
# Block i, layer 2 (MLP).
_lowercase : int = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' )
_lowercase , _lowercase : Any = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , '''decoder''' , lowerCAmelCase__ )
_lowercase : Any = layer_norm
if split_mlp_wi:
_lowercase : Optional[int] = wi[0].T
_lowercase : Union[str, Any] = wi[1].T
else:
_lowercase : Union[str, Any] = wi.T
_lowercase : Union[str, Any] = wo.T
_lowercase : Any = old['''decoder/decoder_norm/scale''']
_lowercase : List[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowercase : Optional[Any] = old['''decoder/logits_dense/kernel'''].T
return new
def _SCREAMING_SNAKE_CASE( snake_case_ : Any , snake_case_ : bool ) ->Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowercase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowercase : Dict = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_lowercase : Tuple = state_dict['''shared.weight''']
return state_dict
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
_lowercase : Union[str, Any] = convert_tax_to_pytorch(lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ )
_lowercase : List[Any] = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : bool = False ) ->List[str]:
'''simple docstring'''
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCAmelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowercase : List[Any] = TaEncoderModel(lowerCAmelCase__ )
else:
_lowercase : Optional[Any] = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print('''Done''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
lowerCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 721
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = ['pixel_values']
def __init__( self : Union[str, Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : int = 32 , UpperCamelCase_ : List[str]=PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , **UpperCamelCase_ : List[str] , ) -> None:
'''simple docstring'''
_lowercase : Any = do_resize
_lowercase : Any = do_rescale
_lowercase : Optional[Any] = size_divisor
_lowercase : Any = resample
super().__init__(**UpperCamelCase_ )
def __lowercase ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : List[str] ) -> np.ndarray:
'''simple docstring'''
_lowercase , _lowercase : Dict = get_image_size(UpperCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
_lowercase : int = height // size_divisor * size_divisor
_lowercase : List[Any] = width // size_divisor * size_divisor
_lowercase : Dict = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
return image
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : Dict ) -> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowercase ( self : Optional[int] , UpperCamelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[TensorType, str]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : int , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_lowercase : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_lowercase : Tuple = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_lowercase : List[str] = [to_numpy_array(UpperCamelCase_ ) for img in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
_lowercase : Optional[int] = [self.rescale(UpperCamelCase_ , scale=1 / 255 ) for image in images]
_lowercase : List[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 411
| 0
|
"""simple docstring"""
def __snake_case ( __A = 50 ) -> int:
lowercase : Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 607
|
"""simple docstring"""
from ... import PretrainedConfig
lowerCAmelCase: str ={
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase__ ( __UpperCamelCase ):
__UpperCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCAmelCase = """nezha"""
def __init__( self , snake_case=2_1_1_2_8 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=6_4 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0.1 , snake_case=0 , snake_case=2 , snake_case=3 , snake_case=True , **snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
lowercase : int = vocab_size
lowercase : Tuple = hidden_size
lowercase : Any = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : int = intermediate_size
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : List[str] = max_relative_position
lowercase : List[str] = type_vocab_size
lowercase : Dict = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Optional[int] = classifier_dropout
lowercase : Union[str, Any] = use_cache
| 607
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCamelCase__ ( self : int ) -> Any:
super().setUp()
__UpperCAmelCase : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCAmelCase : Dict = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCAmelCase : Tuple = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCAmelCase : Optional[int] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def lowerCamelCase__ ( self : Tuple , **snake_case : List[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCamelCase__ ( self : Any , snake_case : Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[Any] = "adapt act apte"
__UpperCAmelCase : Tuple = "adapt act apte"
return input_text, output_text
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : Optional[Any] = "adapt act apte"
__UpperCAmelCase : Union[str, Any] = ["adapt", "act", "ap@@", "te"]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCAmelCase : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__UpperCAmelCase : List[Any] = "I am a small frog."
__UpperCAmelCase : List[Any] = tok([src_text] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )["input_ids"]
__UpperCAmelCase : List[Any] = tok.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__UpperCAmelCase : Tuple = "I am a small frog ."
__UpperCAmelCase : int = "."
__UpperCAmelCase : Optional[int] = tok(lowerCamelCase__ )["input_ids"]
__UpperCAmelCase : int = tok(lowerCamelCase__ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 718
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__UpperCAmelCase :Tuple = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
__UpperCAmelCase :List[str] = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
__UpperCAmelCase :List[Any] = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCamelCase__ ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Dict=False ) -> List[Any]:
if return_pvalue:
__UpperCAmelCase : Tuple = pearsonr(snake_case , snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case , snake_case )[0] )}
| 266
| 0
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'big_bird'
def __init__( self , __snake_case=5_0358 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu_new" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=4096 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=True , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=66 , __snake_case="block_sparse" , __snake_case=True , __snake_case=False , __snake_case=64 , __snake_case=3 , __snake_case=None , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , )
__a =vocab_size
__a =max_position_embeddings
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =type_vocab_size
__a =layer_norm_eps
__a =use_cache
__a =rescale_embeddings
__a =attention_type
__a =use_bias
__a =block_size
__a =num_random_blocks
__a =classifier_dropout
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 242
| 1
|
'''simple docstring'''
def lowercase_ ( __A : str , __A : str ) -> int:
"""simple docstring"""
if len(__A ) != len(__A ):
raise ValueError('''String lengths must match!''' )
lowercase : List[Any] =0
for chara, chara in zip(__A , __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
SCREAMING_SNAKE_CASE_ = {
'''ctrl''': 256,
}
SCREAMING_SNAKE_CASE_ = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def lowercase__ ( lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
UpperCAmelCase = set(_SCREAMING_SNAKE_CASE )
return pairs
class _UpperCAmelCase ( __snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = CONTROL_CODES
def __init__( self , lowercase_ , lowercase_ , lowercase_="<unk>" , **lowercase_ ) -> List[str]:
super().__init__(unk_token=lowercase_ , **lowercase_ )
with open(lowercase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase = json.load(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {}
@property
def a_ ( self ) -> str:
return len(self.encoder )
def a_ ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , lowercase_ ) -> List[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCAmelCase = get_pairs(lowercase_ )
if not pairs:
return token
while True:
UpperCAmelCase = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(lowercase_ ):
try:
UpperCAmelCase = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = new_word
if len(lowercase_ ) == 1:
break
else:
UpperCAmelCase = get_pairs(lowercase_ )
UpperCAmelCase = '@@ '.join(lowercase_ )
UpperCAmelCase = word[:-4]
UpperCAmelCase = word
return word
def a_ ( self , lowercase_ ) -> Any:
UpperCAmelCase = []
UpperCAmelCase = re.findall(R'\S+\n?' , lowercase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase_ ).split(' ' ) ) )
return split_tokens
def a_ ( self , lowercase_ ) -> Dict:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def a_ ( self , lowercase_ ) -> Tuple:
return self.decoder.get(lowercase_ , self.unk_token )
def a_ ( self , lowercase_ ) -> Dict:
UpperCAmelCase = ' '.join(lowercase_ ).replace('@@ ' , '' ).strip()
return out_string
def a_ ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' )
UpperCAmelCase = 0
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase = token_index
writer.write(' '.join(lowercase_ ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 373
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["image_processor", "tokenizer"]
lowerCAmelCase_ = "CLIPImageProcessor"
lowerCAmelCase_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
_snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
_snake_case = kwargs.pop("""feature_extractor""" )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Tuple:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_snake_case = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
_snake_case = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
_snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def lowercase (self ) -> str:
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 585
| 0
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def lowerCamelCase__ ( ):
__snake_case = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case = parser.parse_args()
return args.f
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Optional[Any] ):
__snake_case = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCAmelCase )
def lowercase__ ( self : int , __lowerCAmelCase : List[str] ):
__snake_case = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(__lowerCAmelCase , 'argv' , __lowerCAmelCase ):
__snake_case = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCAmelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self : List[Any] ):
__snake_case = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(__lowerCAmelCase )
__snake_case = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(__lowerCAmelCase )
__snake_case = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(__lowerCAmelCase )
| 427
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : int = RobertaTokenizer
lowercase_ : int = RobertaTokenizerFast
lowercase_ : int = True
lowercase_ : Dict = {'''cls_token''': '''<s>'''}
def lowercase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCAmelCase ) )
def lowercase__ ( self : Tuple , **__lowerCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Dict , **__lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : int ):
__snake_case = 'lower newer'
__snake_case = 'lower newer'
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = 'lower newer'
__snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def lowercase__ ( self : int ):
__snake_case = self.tokenizer_class.from_pretrained('roberta-base' )
__snake_case = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : int ):
__snake_case = self.get_tokenizer()
__snake_case = 'Encode this sequence.'
__snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
__snake_case = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
__snake_case = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
__snake_case = 'Encode <mask> sequence'
__snake_case = 'Encode <mask>sequence'
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = tokenizer.encode(__lowerCAmelCase )
__snake_case = encoded.index(__lowerCAmelCase )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = 'A, <mask> AllenNLP sentence.'
__snake_case = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
__snake_case = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase__ ( self : Optional[int] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , __lowerCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , __lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case = F'{text_of_1_token} {text_of_1_token}'
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
__snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 427
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ : List[str] = f'The input value of [n={number}] has to be > 0'
raise ValueError(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Dict = sylvester(number - 1 )
lowerCAmelCase__ : Any = num - 1
lowerCAmelCase__ : str = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 450
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class a_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__UpperCamelCase , )
assert hasattr(self , """env""" )
def UpperCamelCase_ ( self , __UpperCamelCase ):
# configuration for running training on smdistributed Model Parallel
_lowercase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
_lowercase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
_lowercase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
_lowercase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version="""py36""" , )
def UpperCamelCase_ ( self , __UpperCamelCase ):
TrainingJobAnalytics(__UpperCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , __UpperCamelCase ):
# create estimator
_lowercase = self.create_estimator(__UpperCamelCase )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __UpperCamelCase )
| 287
| 0
|
'''simple docstring'''
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ : int = os.path.join(git_repo_path, """src""", """diffusers""")
class __lowerCAmelCase( unittest.TestCase ):
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :int = find_backend(' if not is_torch_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE_ :str = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE_ :Optional[int] = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE , 'torch_and_transformers_and_onnx' )
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE )
self.assertIn('torch_and_transformers' , SCREAMING_SNAKE_CASE )
self.assertIn('flax_and_transformers' , SCREAMING_SNAKE_CASE )
self.assertIn('torch_and_transformers_and_onnx' , SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE , '\nCONSTANT = None\n' )
SCREAMING_SNAKE_CASE_ :Tuple = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
SCREAMING_SNAKE_CASE_ :List[str] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
SCREAMING_SNAKE_CASE_ :List[str] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
SCREAMING_SNAKE_CASE_ :Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE )
| 233
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ): # This function is recursive
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE_ :Optional[int] = array[0]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = False
SCREAMING_SNAKE_CASE_ :List[Any] = 1
SCREAMING_SNAKE_CASE_ :list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE_ :List[Any] = True
SCREAMING_SNAKE_CASE_ :int = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE_ :Optional[Any] = longest_subsequence(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :int = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE_ :List[Any] = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [pivot, *longest_subsequence(SCREAMING_SNAKE_CASE )]
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "MCTCTFeatureExtractor"
lowerCAmelCase : int = "AutoTokenizer"
def __init__( self : Tuple ,_snake_case : int ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
super().__init__(_snake_case ,_snake_case )
lowercase__ : str = self.feature_extractor
lowercase__ : Tuple = False
def __call__( self : Optional[Any] ,*_snake_case : int ,**_snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_snake_case ,**_snake_case )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase__ : Union[str, Any] = kwargs.pop('''raw_speech''' )
else:
lowercase__ : str = kwargs.pop('''audio''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''sampling_rate''' ,_snake_case )
lowercase__ : Optional[int] = kwargs.pop('''text''' ,_snake_case )
if len(_snake_case ) > 0:
lowercase__ : Dict = args[0]
lowercase__ : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase__ : int = self.feature_extractor(_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
if text is not None:
lowercase__ : Tuple = self.tokenizer(_snake_case ,**_snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : Dict = encodings['''input_ids''']
return inputs
def UpperCAmelCase ( self : Tuple ,*_snake_case : Any ,**_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,*_snake_case : List[str] ,**_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_snake_case ,**_snake_case )
lowercase__ : Dict = kwargs.pop('''input_features''' ,_snake_case )
lowercase__ : Any = kwargs.pop('''labels''' ,_snake_case )
if len(_snake_case ) > 0:
lowercase__ : Tuple = args[0]
lowercase__ : str = args[1:]
if input_features is not None:
lowercase__ : List[str] = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
if labels is not None:
lowercase__ : Union[str, Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase__ : Optional[Any] = labels['''input_ids''']
return input_features
def UpperCAmelCase ( self : List[Any] ,*_snake_case : Optional[int] ,**_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@contextmanager
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase__ : List[str] = True
lowercase__ : Any = self.tokenizer
yield
lowercase__ : Optional[int] = self.feature_extractor
lowercase__ : Any = False
| 560
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : List[str] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = tmp_path / '''cache'''
lowercase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase__ : int = features.copy() if features else default_expected_features
lowercase__ : Dict = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
lowercase__ : Dict = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = tmp_path / '''cache'''
lowercase__ : Tuple = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : int = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase__ : Optional[Any] = iter_sql_file(__lowerCamelCase )
lowercase__ : List[str] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 560
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : List[Any] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : Optional[int] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__A = logging.get_logger(__name__)
@dataclass
class snake_case ( lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Tuple , **UpperCamelCase__ : int)-> Any:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase: Optional[int] = deprecated_arg[3:]
setattr(self , UpperCAmelCase__ , not kwargs.pop(UpperCAmelCase__))
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}")
__lowerCAmelCase: int = kwargs.pop("torchscript" , self.torchscript)
__lowerCAmelCase: List[str] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics)
__lowerCAmelCase: int = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level)
super().__init__(**UpperCAmelCase__)
SCREAMING_SNAKE_CASE_ : List[str] = field(default=lowercase__, metadata={"""help""": """Trace the models using torchscript"""} )
SCREAMING_SNAKE_CASE_ : str = field(default=lowercase__, metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
SCREAMING_SNAKE_CASE_ : Any = field(
default="""O1""", metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
}, )
@cached_property
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
requires_backends(self , ["torch"])
logger.info("PyTorch: setting up devices")
if not self.cuda:
__lowerCAmelCase: List[Any] = torch.device("cpu")
__lowerCAmelCase: int = 0
elif is_torch_tpu_available():
__lowerCAmelCase: Optional[Any] = xm.xla_device()
__lowerCAmelCase: int = 0
else:
__lowerCAmelCase: Optional[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu")
__lowerCAmelCase: str = torch.cuda.device_count()
return device, n_gpu
@property
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowercase_ ( self : int)-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowercase_ ( self : int)-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
return self._setup_devices[0]
@property
def lowercase_ ( self : Union[str, Any])-> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
return self._setup_devices[1]
@property
def lowercase_ ( self : Any)-> Dict:
'''simple docstring'''
return self.n_gpu > 0
| 346
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92
| 0
|
def a_ ( __snake_case , __snake_case ) -> int:
'''simple docstring'''
return abs(__snake_case ) if a == 0 else greatest_common_divisor(b % a , __snake_case )
def a_ ( __snake_case , __snake_case ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCamelCase_ , UpperCamelCase_ = y, x % y
return abs(__snake_case )
def a_ ( ) -> Any:
'''simple docstring'''
try:
UpperCamelCase_ = input('Enter two integers separated by comma (,): ' ).split(',' )
UpperCamelCase_ = int(nums[0] )
UpperCamelCase_ = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(__snake_case , __snake_case )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__snake_case , __snake_case )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 559
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def a_ ( __snake_case ) -> str:
'''simple docstring'''
UpperCamelCase_ = torch.load(__snake_case , map_location='cpu' )
if "model" in sd.keys():
UpperCamelCase_ = torch.load(__snake_case , map_location='cpu' )['model']
# pop unnecessary weights
UpperCamelCase_ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__snake_case )
UpperCamelCase_ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase_ = sd.pop(__snake_case )
UpperCamelCase_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase_ = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase_ = key.replace('.qkv_proj.' , '.q_proj.' )
UpperCamelCase_ = key.replace('.qkv_proj.' , '.k_proj.' )
UpperCamelCase_ = key.replace('.qkv_proj.' , '.v_proj.' )
UpperCamelCase_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = torch.split(__snake_case , depth // 3 , dim=0 )
UpperCamelCase_ = q
UpperCamelCase_ = k
UpperCamelCase_ = v
del sd[key]
return sd
@torch.no_grad()
def a_ ( __snake_case , __snake_case , __snake_case=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase_ = load_checkpoint(__snake_case )
if config is not None:
UpperCamelCase_ = OPTConfig.from_pretrained(__snake_case )
else:
UpperCamelCase_ = OPTConfig()
UpperCamelCase_ = OPTModel(__snake_case ).half().eval()
model.load_state_dict(__snake_case )
# Check results
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 559
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ = HfApi()
lowercase__ = {}
# fmt: off
lowercase__ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase__ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase__ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase__ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase__ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase__ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase__ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase__ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase__ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase__ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase__ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase__ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase__ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase__ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase__ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase__ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 508
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=13 , lowerCamelCase : Dict=32 , lowerCamelCase : str=3 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Optional[int]=[10, 20, 30, 40] , lowerCamelCase : Dict=[2, 2, 3, 2] , lowerCamelCase : Optional[int]=True , lowerCamelCase : str=True , lowerCamelCase : Optional[int]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : int=10 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : Optional[int]=[2, 3, 4] , lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
__A : str = parent
__A : List[str] = batch_size
__A : Dict = image_size
__A : Dict = num_channels
__A : Optional[Any] = num_stages
__A : Tuple = hidden_sizes
__A : Optional[Any] = depths
__A : List[str] = is_training
__A : List[str] = use_labels
__A : str = intermediate_size
__A : List[str] = hidden_act
__A : Optional[Any] = num_labels
__A : int = initializer_range
__A : Union[str, Any] = out_features
__A : List[str] = out_indices
__A : Optional[int] = scope
def lowercase_( self : Dict ):
"""simple docstring"""
__A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Optional[int] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase_( self : Dict ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase_( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ):
"""simple docstring"""
__A : Tuple = ConvNextModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[Any] = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
"""simple docstring"""
__A : Optional[Any] = ConvNextForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
__A : Optional[Any] = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Tuple = model(lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__A : List[str] = None
__A : Any = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[Any] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_( self : str ):
"""simple docstring"""
__A : List[str] = self.prepare_config_and_inputs()
__A , __A , __A : Tuple = config_and_inputs
__A : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase =True
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
def lowercase_( self : List[str] ):
"""simple docstring"""
__A : Optional[int] = ConvNextModelTester(self )
__A : int = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowercase_( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowercase_( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowercase_( self : Dict ):
"""simple docstring"""
pass
def lowercase_( self : str ):
"""simple docstring"""
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = model_class(lowerCamelCase )
__A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Dict = [*signature.parameters.keys()]
__A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowercase_( self : Tuple ):
"""simple docstring"""
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase_( self : List[str] ):
"""simple docstring"""
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : List[Any] ):
__A : List[Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__A : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__A : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : int = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase_( self : Dict ):
"""simple docstring"""
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase_( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = ConvNextModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( ) -> List[str]:
"""simple docstring"""
__A : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase_( self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A : Optional[int] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCamelCase )
__A : List[Any] = self.default_image_processor
__A : Tuple = prepare_img()
__A : Optional[int] = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__A : List[Any] = model(**lowerCamelCase )
# verify the logits
__A : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__A : List[Any] = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@require_torch
class __A ( unittest.TestCase , _SCREAMING_SNAKE_CASE ):
lowerCamelCase =(ConvNextBackbone,) if is_torch_available() else ()
lowerCamelCase =ConvNextConfig
lowerCamelCase =False
def lowercase_( self : Any ):
"""simple docstring"""
__A : List[Any] = ConvNextModelTester(self )
| 499
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A__ : List[str] ='pt'
elif is_tf_available():
A__ : List[str] ='tf'
else:
A__ : List[str] ='jax'
class __A ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase =PerceiverTokenizer
lowerCamelCase =False
def lowercase_( self : int ):
"""simple docstring"""
super().setUp()
__A : Optional[int] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_( self : Any ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowercase_( self : Optional[Any] , **lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase_( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=20 , lowerCamelCase : List[str]=5 ):
"""simple docstring"""
__A : Optional[Any] = []
for i in range(len(lowerCamelCase ) ):
try:
__A : str = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__A : List[str] = list(filter(lambda lowerCamelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , lowerCamelCase ) )
__A : List[str] = list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase ) , lowerCamelCase ) )
if max_length is not None and len(lowerCamelCase ) > max_length:
__A : Optional[Any] = toks[:max_length]
if min_length is not None and len(lowerCamelCase ) < min_length and len(lowerCamelCase ) > 0:
while len(lowerCamelCase ) < min_length:
__A : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__A : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__A : Optional[Any] = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
if " " not in output_txt and len(lowerCamelCase ) > 1:
__A : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase )
)
if with_prefix_space:
__A : Union[str, Any] = """ """ + output_txt
__A : Any = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
return output_txt, output_ids
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : int = self.perceiver_tokenizer
__A : List[str] = """Unicode €."""
__A : Dict = tokenizer(lowerCamelCase )
__A : Any = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase )
# decoding
__A : Dict = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , """[CLS]Unicode €.[SEP]""" )
__A : int = tokenizer("""e è é ê ë""" )
__A : str = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase )
# decoding
__A : Optional[Any] = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
__A : Optional[Any] = self.perceiver_tokenizer
__A : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__A : Any = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
__A : Optional[int] = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
if FRAMEWORK != "jax":
__A : str = list(batch.input_ids.numpy()[0] )
else:
__A : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : str = self.perceiver_tokenizer
__A : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__A : Optional[int] = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCamelCase )
self.assertIn("""attention_mask""" , lowerCamelCase )
self.assertNotIn("""decoder_input_ids""" , lowerCamelCase )
self.assertNotIn("""decoder_attention_mask""" , lowerCamelCase )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : Optional[int] = self.perceiver_tokenizer
__A : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
__A : Union[str, Any] = tokenizer(
text_target=lowerCamelCase , max_length=32 , padding="""max_length""" , truncation=lowerCamelCase , return_tensors=lowerCamelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__A : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : Optional[int] = tempfile.mkdtemp()
__A : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__A : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__A : Optional[int] = tokenizer.__class__.from_pretrained(lowerCamelCase )
__A : Optional[int] = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
shutil.rmtree(lowerCamelCase )
__A : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : List[Any] = tempfile.mkdtemp()
__A : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__A : str = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__A : List[Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
__A : Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase )
__A : Union[str, Any] = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__A : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase )
def lowercase_( self : int ):
"""simple docstring"""
__A : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__A : int = json.load(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__A : Any = json.load(lowerCamelCase )
__A : str = [f"<extra_id_{i}>" for i in range(1_25 )]
__A : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__A : Any = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : int = tokenizer_class.from_pretrained(
lowerCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : int = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCamelCase )]
__A : str = tokenizer_class.from_pretrained(
lowerCamelCase , additional_special_tokens=lowerCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
__A : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def lowercase_( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase_( self : Any ):
"""simple docstring"""
pass
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowercase_( self : Optional[int] ):
"""simple docstring"""
__A : Optional[int] = self.get_tokenizers(fast=lowerCamelCase , do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__A : Optional[int] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
__A : List[Any] = tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
| 499
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , *lowercase , **lowercase ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowercase , )
super().__init__(*lowercase , **lowercase )
| 601
|
'''simple docstring'''
a__ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a__ : List[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a__ : Any = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
assert len(str(__A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCamelCase = year // 100
__UpperCamelCase = (5 * (century % 4) + 2) % 7
__UpperCamelCase = year % 100
__UpperCamelCase = centurian % 12
__UpperCamelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCamelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCamelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601
| 1
|
class _lowercase :
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
a = None
a = None
a = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
a = len(__lowerCAmelCase )
a = None
def A ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
if sources is int:
a = [sources]
if sinks is int:
a = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
a = sources[0]
a = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
a = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
a = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
a = max_input_flow
a = 0
a = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
a = max_input_flow
a = size - 1
def A ( self : Any ) -> Tuple:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A ( self : Optional[int] , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
a = algorithm(self )
class _lowercase :
def __init__( self : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
a = flow_network
a = flow_network.verticesCount
a = flow_network.sourceIndex
a = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
a = flow_network.graph
a = False
def A ( self : Any ) -> Dict:
"""simple docstring"""
if not self.executed:
self._algorithm()
a = True
def A ( self : Any ) -> Tuple:
"""simple docstring"""
pass
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Dict , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
a = -1
def A ( self : Any ) -> List[str]:
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Any , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = [[0] * self.verticies_count for i in range(self.verticies_count )]
a = [0] * self.verticies_count
a = [0] * self.verticies_count
def A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
a = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
a = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
a = 0
while i < len(__lowerCAmelCase ):
a = vertices_list[i]
a = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
a = 0
else:
i += 1
a = sum(self.preflow[self.source_index] )
def A ( self : int , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
a = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A ( self : Any , __lowerCAmelCase : str ) -> int:
"""simple docstring"""
a = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
a = self.heights[to_index]
if min_height is not None:
a = min_height + 1
if __name__ == "__main__":
A_ : str = [0]
A_ : Union[str, Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A_ : Union[str, Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A_ : int = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A_ : Any = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
| 1
|
'''simple docstring'''
import copy
import re
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : List[Any] = '''hp'''
_a : int = {}
_a : Tuple = None
@classmethod
def _a ( cls , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = prefix
a_ = defaults
cls.build_naming_info()
@staticmethod
def _a ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if len(_UpperCAmelCase ) == 0:
return ""
a_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
a_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
a_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCamelCase__ ):
a_ = ''
while integer != 0:
a_ = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
a_ = 0
while True:
a_ = word + '#' + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
a_ = sword
break
a_ = short_word
a_ = word
return short_word
@staticmethod
def _a ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = param_name.split('_' )
a_ = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
a_ = ['', '_']
for separator in separators:
a_ = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
a_ = shortname
a_ = param_name
return shortname
return param_name
@staticmethod
def _a ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
a_ = short_name
a_ = param_name
@classmethod
def _a ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
a_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
a_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
a_ = info
@classmethod
def _a ( cls , UpperCamelCase__ ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
a_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
a_ = cls.NAMING_INFO['short_param'][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = 1 if v else 0
a_ = '' if isinstance(_UpperCAmelCase , (int, float) ) else '-'
a_ = f'{key}{sep}{v}'
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def _a ( cls , UpperCamelCase__ ):
"""simple docstring"""
a_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
a_ = []
else:
a_ = repr.split('_' )
a_ = {}
for value in values:
if "-" in value:
a_ , a_ = value.split('-' )
else:
a_ = re.sub('[0-9.]' , '' , _UpperCAmelCase )
a_ = float(re.sub('[^0-9.]' , '' , _UpperCAmelCase ) )
a_ = cls.NAMING_INFO['reverse_short_param'][p_k]
a_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
a_ = cls.DEFAULTS[k]
return parameters
| 536
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : Optional[Any] = logging.get_logger(__name__)
A : str = '▁'
A : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PegasusTokenizer
A__ = ['''input_ids''', '''attention_mask''']
def __init__(self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<mask_2>" , _UpperCAmelCase : Optional[Any]="<mask_1>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=103 , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCAmelCase )}, but is'''
f''' {type(_UpperCAmelCase )}''' )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCAmelCase ) , self.offset - 1 )
]
if len(set(_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , pad_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , mask_token_sent=_UpperCAmelCase , offset=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 15
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , ):
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = 30
snake_case_ = self.seq_length + self.mem_len
snake_case_ = 15
snake_case_ = True
snake_case_ = True
snake_case_ = 99
snake_case_ = [10, 50, 80]
snake_case_ = 32
snake_case_ = 32
snake_case_ = 4
snake_case_ = 8
snake_case_ = 1_28
snake_case_ = 2
snake_case_ = 2
snake_case_ = None
snake_case_ = 1
snake_case_ = 0
snake_case_ = 3
snake_case_ = self.vocab_size - 1
snake_case_ = 0.01
def UpperCamelCase__ ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase__ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TFTransfoXLModel(_UpperCAmelCase )
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
snake_case_ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TFTransfoXLLMHeadModel(_UpperCAmelCase )
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
snake_case_ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
snake_case_ , snake_case_ = model([input_ids_a, mems_a] ).to_tuple()
snake_case_ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
snake_case_ , snake_case_ = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TFTransfoXLForSequenceClassification(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__snake_case = () if is_tf_available() else ()
__snake_case = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase__ ( self ):
snake_case_ = TFTransfoXLModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
self.model_tester.set_seed()
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self.model_tester.set_seed()
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
snake_case_ = model.get_output_embeddings()
assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer )
snake_case_ = model.get_bias()
assert name is None
else:
snake_case_ = model.get_output_embeddings()
assert x is None
snake_case_ = model.get_bias()
assert name is None
def UpperCamelCase__ ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFTransfoXLModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCamelCase__ ( self ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCamelCase__ ( self ):
snake_case_ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
snake_case_ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
snake_case_ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
snake_case_ = model.generate(_UpperCAmelCase , max_length=2_00 , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
| 531
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None )-> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
snake_case_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = weights[0][0][0]
snake_case_ = np.asarray(layer_norm_a[0] )
snake_case_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
snake_case_ = weights[0][1]
if len(SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
# intermediate weighs
snake_case_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE ) == 4:
snake_case_ = intermediate_weights[2]
# layernorm 2
snake_case_ = np.asarray(intermediate_weights[0][0] )
snake_case_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate dense
snake_case_ = np.asarray(intermediate_weights[1][0] )
snake_case_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate out
snake_case_ = np.asarray(intermediate_weights[4][0] )
snake_case_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = torch_model.reformer
# word embeds
snake_case_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE ):
snake_case_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) )
snake_case_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# output layer norm
snake_case_ = np.asarray(weights[7][0] )
snake_case_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# output embeddings
snake_case_ = np.asarray(weights[9][0] )
snake_case_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case_ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case_ = pickle.load(SCREAMING_SNAKE_CASE )['''weights''']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 531
| 1
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.txt'''}
a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
a = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( _snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with open(_snake_case , 'r' ) as f:
_A = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : List[str]="<eos>" , **_UpperCAmelCase : Dict , ):
super().__init__(**_UpperCAmelCase )
_A = load_vocab_file(_UpperCAmelCase )
_A = dict(enumerate(self.all_tokens ) )
_A = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_A = unk_token
_A = cls_token
_A = pad_token
_A = mask_token
_A = eos_token
_A = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int , **_UpperCAmelCase : List[str] ):
return text.split()
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str]=False ):
return len(self._id_to_token )
def lowerCAmelCase_ ( self : Optional[int] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.cls_token_id]
_A = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_A = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_UpperCAmelCase ) + [1]
return mask
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Any ):
_A = os.path.join(_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_UpperCAmelCase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase_ ( self : int ):
return self.get_vocab_size(with_added_tokens=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[List[str], List[AddedToken]] , _UpperCAmelCase : bool = False ):
return super()._add_tokens(_UpperCAmelCase , special_tokens=_UpperCAmelCase )
| 7
|
def UpperCamelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(_UpperCAmelCase ) == len(_UpperCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : List[str] = equationa
_lowercase , _lowercase , _lowercase : List[str] = equationa
# Calculate the determinants of the matrices
_lowercase : List[Any] = aa * ba - aa * ba
_lowercase : Tuple = ca * ba - ca * ba
_lowercase : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : str = determinant_x / determinant
_lowercase : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 461
| 0
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =DistilBertTokenizer
_lowerCamelCase =DistilBertTokenizerFast
_lowerCamelCase =True
@slow
def __snake_case ( self : List[Any] ):
UpperCAmelCase = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 570
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
a__ : Optional[Any] = None
a__ : Any = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a__ : Dict = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
a__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
a__ : int = '▁'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =VOCAB_FILES_NAMES
_lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase =["input_ids", "token_type_ids"]
_lowerCamelCase =FNetTokenizer
def __init__( self : List[Any] , a__ : Optional[Any]=None , a__ : Optional[int]=None , a__ : List[str]=False , a__ : Tuple=True , a__ : int=True , a__ : Optional[Any]="<unk>" , a__ : Union[str, Any]="[SEP]" , a__ : int="<pad>" , a__ : Dict="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase = (
AddedToken(a__ , lstrip=a__ , rstrip=a__ , normalized=a__ )
if isinstance(a__ , a__ )
else mask_token
)
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def __snake_case ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self : Optional[Any] , a__ : str , a__ : Optional[str] = None ):
if not os.path.isdir(a__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 570
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : nn.Module , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Optional[Any] = module
UpperCamelCase__ : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : str = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) + self.adapter(SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowerCAmelCase : Optional[Any] = '''bigscience/bloom-1b7'''
# Constant values
_lowerCAmelCase : int = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
_lowerCAmelCase : List[str] = '''Hello my name is'''
_lowerCAmelCase : Union[str, Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowerCAmelCase : str = 1_0
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __a ( A__ ):
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
UpperCamelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
UpperCamelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "quantization_config" ) )
UpperCamelCase__ : List[Any] = config.to_dict()
UpperCamelCase__ : Dict = config.to_diff_dict()
UpperCamelCase__ : Tuple = config.to_json_string()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
UpperCamelCase__ : Optional[int] = self.model_fpaa.get_memory_footprint()
UpperCamelCase__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase__ : Any = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase__ : Optional[Any] = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = BitsAndBytesConfig()
UpperCamelCase__ : int = True
UpperCamelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map="auto" )
UpperCamelCase__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase__ : Union[str, Any] = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __lowercase ( self : Dict ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : int = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase__ : List[Any] = self.model_fpaa.to(torch.floataa )
UpperCamelCase__ : List[str] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCamelCase__ : List[str] = self.model_fpaa.to("cpu" )
# Check this does not throw an error
UpperCamelCase__ : str = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase__ : List[Any] = self.model_fpaa.float()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : str = "t5-small"
UpperCamelCase__ : Any = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase__ : Dict = "Translate in German: Hello, my dog is cute"
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Any ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
UpperCamelCase__ : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase__ : List[str] = None
# test with `t5-small`
UpperCamelCase__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
UpperCamelCase__ : Optional[int] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase__ : str = model.generate(**SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
UpperCamelCase__ : Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
UpperCamelCase__ : Tuple = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase__ : List[str] = model.generate(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = modules
def __lowercase ( self : List[Any] ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase__ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase__ : str = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase__ : int = model.generate(**SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
UpperCamelCase__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
UpperCamelCase__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase__ : Optional[Any] = model.generate(**SCREAMING_SNAKE_CASE )
class __a ( A__ ):
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().setUp()
# model_name
UpperCamelCase__ : Optional[Any] = "bigscience/bloom-560m"
UpperCamelCase__ : Any = "t5-small"
# Different types of model
UpperCamelCase__ : Union[str, Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
# Sequence classification model
UpperCamelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
# CausalLM model
UpperCamelCase__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
# Seq2seq model
UpperCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="auto" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __a ( A__ ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
def __lowercase ( self : Any ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase__ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __a ( A__ ):
def __lowercase ( self : str ):
'''simple docstring'''
super().setUp()
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
UpperCamelCase__ : Optional[int] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class __a ( A__ ):
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = "facebook/opt-350m"
super().setUp()
def __lowercase ( self : List[str] ):
'''simple docstring'''
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
UpperCamelCase__ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase__ : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase__ : int = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Optional[Any] = LoRALayer(module.q_proj , rank=16 )
UpperCamelCase__ : Union[str, Any] = LoRALayer(module.k_proj , rank=16 )
UpperCamelCase__ : Tuple = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCamelCase__ : Tuple = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase__ : Tuple = model.forward(**SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __a ( A__ ):
_lowerCAmelCase : Dict = '''gpt2-xl'''
_lowerCAmelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 228
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Dict =logging.get_logger(__name__)
lowerCamelCase : int =[
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase__ : Optional[int] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
if k.startswith("encoder" ):
UpperCamelCase__ : Dict = k.replace(".attn" , ".self_attn" )
UpperCamelCase__ : Optional[int] = k.replace("norm1" , "self_attn_layer_norm" )
UpperCamelCase__ : Dict = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
UpperCamelCase__ : int = k.replace("norm1" , "self_attn_layer_norm" )
UpperCamelCase__ : List[Any] = k.replace("norm2" , "encoder_attn_layer_norm" )
UpperCamelCase__ : str = k.replace("norm3" , "final_layer_norm" )
return k
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
UpperCamelCase__ : str = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
UpperCamelCase__ : Union[str, Any] = sd.pop(__lowerCAmelCase )
UpperCamelCase__ : Dict = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
UpperCamelCase__ : Union[str, Any] = v
lowerCamelCase : Any =['''START''']
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
UpperCamelCase__ : Union[str, Any] = torch.load(__lowerCAmelCase , map_location="cpu" )
UpperCamelCase__ : str = model["model"]
UpperCamelCase__ : int = BlenderbotConfig.from_json_file(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = BlenderbotForConditionalGeneration(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = m.model.state_dict().keys()
UpperCamelCase__ : str = []
UpperCamelCase__ : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase__ : Union[str, Any] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCamelCase__ : Optional[int] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCAmelCase )
m.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
m.half()
m.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowerCamelCase : Any =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 228
| 1
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A_ = logging.get_logger(__name__)
class _a (lowercase__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 721
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase = """switch_transformers"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self ,lowerCamelCase_=3_2_1_2_8 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=6_4 ,lowerCamelCase_=2_0_4_8 ,lowerCamelCase_=6_4 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3 ,lowerCamelCase_=1_2 ,lowerCamelCase_=8 ,lowerCamelCase_=False ,lowerCamelCase_=0.01 ,lowerCamelCase_="float32" ,lowerCamelCase_=False ,lowerCamelCase_=3_2 ,lowerCamelCase_=1_2_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-6 ,lowerCamelCase_=0.0_01 ,lowerCamelCase_=0.0_01 ,lowerCamelCase_=1.0 ,lowerCamelCase_="relu" ,lowerCamelCase_=True ,lowerCamelCase_=False ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,**lowerCamelCase_ ,) -> List[str]:
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_sparse_encoder_layers
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A = self.num_layers // self.num_sparse_encoder_layers
else:
A = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A = self.num_decoder_layers # HACK: this will create 0 sparse layers
A = num_heads
A = num_experts
A = expert_capacity
A = router_bias
A = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A = router_dtype
A = router_ignore_padding_tokens
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = add_router_probs
A = router_z_loss_coef
A = router_aux_loss_coef
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
super().__init__(
pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,**lowerCamelCase_ ,)
| 617
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ : str = Path(SCREAMING_SNAKE_CASE__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ : Tuple = Path(SCREAMING_SNAKE_CASE__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : List[Any] = Path(SCREAMING_SNAKE_CASE__ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : Dict = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ ).to_dict()
config_dict.pop("""image_processor_type""" )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPImageProcessor(**SCREAMING_SNAKE_CASE__ )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : str = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = Path(SCREAMING_SNAKE_CASE__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) , )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , """clip-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained("""clip-base""" )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE__ : Dict = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ , revision="""aaaaaa""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE__ )
AutoImageProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoImageProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = Path(SCREAMING_SNAKE_CASE__ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE__ : str = Path(SCREAMING_SNAKE_CASE__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(SCREAMING_SNAKE_CASE__ , """w""" ) )
SCREAMING_SNAKE_CASE__ : Dict = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = True
try:
AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE__ )
AutoImageProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(SCREAMING_SNAKE_CASE__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 704
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Tuple = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = False
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE__ : Dict = embedding_size
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFMobileBertModel(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 6, 3_05_22]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
| 545
| 0
|
__lowerCamelCase : str = {str(digit): digit**5 for digit in range(10)}
def lowerCamelCase_(lowerCamelCase_ ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase_ ) )
def lowerCamelCase_() -> int:
return sum(
number
for number in range(1_000 , 1_000_000 )
if number == digits_fifth_powers_sum(lowerCamelCase_ ) )
if __name__ == "__main__":
print(solution())
| 323
|
__lowerCamelCase : str = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowerCamelCase_(lowerCamelCase_ ) -> int:
UpperCAmelCase = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
UpperCAmelCase = 0
UpperCAmelCase = 0
while place < len(lowerCamelCase_ ):
if (place + 1 < len(lowerCamelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCamelCase_(lowerCamelCase_ ) -> str:
UpperCAmelCase = []
for arabic, roman in ROMAN:
((UpperCAmelCase) , (UpperCAmelCase)) = divmod(lowerCamelCase_ , lowerCamelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCAmelCase_ : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCAmelCase_ : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCAmelCase_ : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCAmelCase_ : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def A__ ( self :Optional[Any] , __snake_case :Dict , __snake_case :Union[str, Any] , __snake_case :List[Any]=[1, 10, 1_00] , __snake_case :Dict=4 , __snake_case :Tuple=3.0 ):
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
__magic_name__ : Optional[Any] =[]
__magic_name__ : List[Any] =Counter()
__magic_name__ : str =0
__magic_name__ : Dict =defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
__magic_name__ : Optional[Any] =candidate + """\n""" + test_case
__magic_name__ : str =(test_program, timeout, task_id, completion_id[task_id])
__magic_name__ : List[Any] =executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
__magic_name__ : Optional[Any] =future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__magic_name__ , __magic_name__ : int =[], []
for result in results.values():
result.sort()
__magic_name__ : Optional[int] =[r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
__magic_name__ : Optional[Any] =np.array(__UpperCamelCase )
__magic_name__ : str =np.array(__UpperCamelCase )
__magic_name__ : Dict =k
__magic_name__ : str ={f"pass@{k}": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
def estimator(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
__magic_name__ : Dict =itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
__magic_name__ : int =iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] )
| 719
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase_ : str = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 367
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ['LayoutLMv3FeatureExtractor']
A_ : Union[str, Any] = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Optional[Any] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 256, 'width': 256}
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_flip_channel_order
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCamelCase_ = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = target_sizes.numpy()
lowerCamelCase_ = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase_ = logits.argmax(dim=1 )
lowerCamelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 714
|
'''simple docstring'''
A_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
A_ = ["a", "b", "c", "d", "e"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = start
# add current to visited
visited.append(__UpperCamelCase )
lowerCamelCase_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
A_ = topological_sort("a", [], [])
print(sort)
| 384
| 0
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase__ = object()
# For specifying empty leaf dict `{}`
lowercase__ = object()
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ):
__a : Optional[int] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(_lowerCamelCase ) - len(_lowerCamelCase ) + 1 ):
__a : Union[str, Any] = [x.match(_lowerCamelCase ) for x, y in zip(_lowerCamelCase , ks[i:] )]
if matches and all(_lowerCamelCase ):
return True
return False
def __magic_name__ ( _lowerCamelCase : str ):
def replace(_lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
for rule, replacement in rules:
if _match(_lowerCamelCase , _lowerCamelCase ):
return replacement
return val
return replace
def __magic_name__ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , _lowerCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , _lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowerCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , _lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowerCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , _lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
__a : Union[str, Any] = _get_partition_rules()
__a : Any = _replacement_rules(_lowerCamelCase )
__a : Tuple = {k: _unmatched for k in flatten_dict(_lowerCamelCase )}
__a : Any = {k: replace(_lowerCamelCase , _lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowerCamelCase ) )
| 581
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "blip_2_vision_model"
def __init__(self , _lowercase=1408 , _lowercase=6144 , _lowercase=39 , _lowercase=16 , _lowercase=224 , _lowercase=14 , _lowercase="gelu" , _lowercase=0.0_0001 , _lowercase=0.0 , _lowercase=1e-10 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Tuple = hidden_size
__a : Any = intermediate_size
__a : Dict = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : str = patch_size
__a : Union[str, Any] = image_size
__a : List[Any] = initializer_range
__a : List[str] = attention_dropout
__a : Union[str, Any] = layer_norm_eps
__a : Optional[int] = hidden_act
__a : int = qkv_bias
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : Optional[int] = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
__a : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "blip_2_qformer"
def __init__(self , _lowercase=30522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase=2 , _lowercase=1408 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
__a : int = vocab_size
__a : Union[str, Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Any = num_attention_heads
__a : List[str] = hidden_act
__a : Union[str, Any] = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Union[str, Any] = initializer_range
__a : Union[str, Any] = layer_norm_eps
__a : Any = position_embedding_type
__a : Union[str, Any] = cross_attention_frequency
__a : str = encoder_hidden_size
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : Optional[int] = cls.get_config_dict(_lowercase , **_lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
__a : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "blip-2"
_lowerCAmelCase = True
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=32 , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
if vision_config is None:
__a : List[Any] = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
__a : Optional[int] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
__a : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__a : List[Any] = BlipaVisionConfig(**_lowercase )
__a : List[str] = BlipaQFormerConfig(**_lowercase )
__a : str = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__a : Optional[Any] = CONFIG_MAPPING[text_model_type](**_lowercase )
__a : Dict = self.text_config.tie_word_embeddings
__a : Optional[Any] = self.text_config.is_encoder_decoder
__a : Union[str, Any] = num_query_tokens
__a : Union[str, Any] = self.vision_config.hidden_size
__a : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a : Optional[Any] = 1.0
__a : List[str] = 0.02
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase , **_lowercase , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.qformer_config.to_dict()
__a : List[Any] = self.text_config.to_dict()
__a : int = self.__class__.model_type
return output
| 581
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase =logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : Dict =SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , )
_UpperCAmelCase : List[str] =DetaConfig(
backbone_config=__lowerCamelCase , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=__lowerCamelCase , with_box_refine=__lowerCamelCase , two_stage=__lowerCamelCase , )
# set labels
_UpperCAmelCase : Dict ='huggingface/label-files'
if "o365" in model_name:
_UpperCAmelCase : Any =3_6_6
_UpperCAmelCase : int ='object365-id2label.json'
else:
_UpperCAmelCase : str =9_1
_UpperCAmelCase : List[str] ='coco-detection-id2label.json'
_UpperCAmelCase : Optional[int] =num_labels
_UpperCAmelCase : int =json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
_UpperCAmelCase : int ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : int =idalabel
_UpperCAmelCase : Tuple ={v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : str =[]
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : int =dct.pop(__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =val
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : Dict =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase : int =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase : Tuple =state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
_UpperCAmelCase : int =state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : int =in_proj_weight[:dim, :]
_UpperCAmelCase : Dict =in_proj_bias[: dim]
_UpperCAmelCase : Tuple =in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase : Optional[int] =in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase : str =in_proj_weight[
-dim :, :
]
_UpperCAmelCase : Tuple =in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : str =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase : Dict =state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCAmelCase : Optional[int] =state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[Any] =in_proj_weight[:hidden_size, :]
_UpperCAmelCase : Dict =in_proj_bias[:hidden_size]
_UpperCAmelCase : Optional[int] =in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCAmelCase : str =in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : Any =in_proj_weight[-hidden_size:, :]
_UpperCAmelCase : Dict =in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase : str =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : List[str] =get_deta_config(__lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
_UpperCAmelCase : Optional[Any] =hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase : List[str] =hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f"Model name {model_name} not supported" )
_UpperCAmelCase : Dict =torch.load(__lowerCamelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
# rename keys
_UpperCAmelCase : Any =create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCAmelCase : List[str] =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : List[str] =val
if "input_proj" in key:
_UpperCAmelCase : str =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : str =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCAmelCase : int =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : Tuple =val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase : Any =DetaForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
_UpperCAmelCase : str ='cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowerCamelCase )
# load image processor
_UpperCAmelCase : Tuple =DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_UpperCAmelCase : Union[str, Any] =prepare_img()
_UpperCAmelCase : int =processor(images=__lowerCamelCase , return_tensors='pt' )
_UpperCAmelCase : Dict =encoding['pixel_values']
_UpperCAmelCase : Any =model(pixel_values.to(__lowerCamelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCAmelCase : Any =torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
_UpperCAmelCase : Optional[Any] =torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase : Optional[int] =torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
_UpperCAmelCase : int =torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCamelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 702
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="EncodecFeatureExtractor"
UpperCAmelCase =("T5Tokenizer", "T5TokenizerFast")
def __init__( self , snake_case , snake_case) -> List[str]:
'''simple docstring'''
super().__init__(snake_case , snake_case)
_UpperCAmelCase : Dict =self.feature_extractor
_UpperCAmelCase : List[str] =False
def lowerCAmelCase ( self , snake_case=None , snake_case=None , snake_case=True) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=snake_case , language=snake_case , no_timestamps=snake_case)
def __call__( self , *snake_case , **snake_case) -> List[Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case , **snake_case)
_UpperCAmelCase : Union[str, Any] =kwargs.pop('audio' , snake_case)
_UpperCAmelCase : str =kwargs.pop('sampling_rate' , snake_case)
_UpperCAmelCase : str =kwargs.pop('text' , snake_case)
if len(snake_case) > 0:
_UpperCAmelCase : Union[str, Any] =args[0]
_UpperCAmelCase : int =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if text is not None:
_UpperCAmelCase : List[str] =self.tokenizer(snake_case , **snake_case)
if audio is not None:
_UpperCAmelCase : int =self.feature_extractor(snake_case , *snake_case , sampling_rate=snake_case , **snake_case)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCAmelCase : Union[str, Any] =audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_UpperCAmelCase : Tuple =audio_inputs['padding_mask']
return inputs
def lowerCAmelCase ( self , *snake_case , **snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =kwargs.pop('audio' , snake_case)
_UpperCAmelCase : str =kwargs.pop('padding_mask' , snake_case)
if len(snake_case) > 0:
_UpperCAmelCase : Tuple =args[0]
_UpperCAmelCase : Optional[int] =args[1:]
if audio_values is not None:
return self._decode_audio(snake_case , padding_mask=snake_case)
else:
return self.tokenizer.batch_decode(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : List[str] =to_numpy(snake_case)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] =audio_values.shape
if padding_mask is None:
return list(snake_case)
_UpperCAmelCase : Optional[Any] =to_numpy(snake_case)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCAmelCase : List[str] =seq_len - padding_mask.shape[-1]
_UpperCAmelCase : List[str] =1 - self.feature_extractor.padding_value
_UpperCAmelCase : Optional[int] =np.pad(snake_case , ((0, 0), (0, difference)) , 'constant' , constant_values=snake_case)
_UpperCAmelCase : List[str] =audio_values.tolist()
for i in range(snake_case):
_UpperCAmelCase : Dict =np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCAmelCase : Union[str, Any] =sliced_audio.reshape(snake_case , -1)
return audio_values
| 331
| 0
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self : str , A_ : float , A_ : Callable , A_ : int , A_ : float = 1.0 , A_ : str = None , ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = initial_learning_rate
lowerCamelCase_ = warmup_steps
lowerCamelCase_ = power
lowerCamelCase_ = decay_schedule_fn
lowerCamelCase_ = name
def __call__( self : Dict , A_ : Any ) -> int:
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCamelCase_ = tf.cast(A_ , tf.floataa )
lowerCamelCase_ = tf.cast(self.warmup_steps , tf.floataa )
lowerCamelCase_ = global_step_float / warmup_steps_float
lowerCamelCase_ = self.initial_learning_rate * tf.math.pow(A_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=A_ , )
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : int , lowercase : int , lowercase : float = 0.0 , lowercase : float = 0.9 , lowercase : float = 0.999 , lowercase : float = 1e-8 , lowercase : Optional[float] = None , lowercase : Optional[float] = None , lowercase : float = 0.0 , lowercase : float = 1.0 , lowercase : Optional[List[str]] = None , ):
'''simple docstring'''
lowerCamelCase_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowercase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowercase , )
if num_warmup_steps:
lowerCamelCase_ = WarmUp(
initial_learning_rate=lowercase , decay_schedule_fn=lowercase , warmup_steps=lowercase , )
if weight_decay_rate > 0.0:
lowerCamelCase_ = AdamWeightDecay(
learning_rate=lowercase , weight_decay_rate=lowercase , beta_a=lowercase , beta_a=lowercase , epsilon=lowercase , clipnorm=lowercase , global_clipnorm=lowercase , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowercase , )
else:
lowerCamelCase_ = tf.keras.optimizers.Adam(
learning_rate=lowercase , beta_a=lowercase , beta_a=lowercase , epsilon=lowercase , clipnorm=lowercase , global_clipnorm=lowercase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , A_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , A_ : float = 0.9 , A_ : float = 0.999 , A_ : float = 1E-7 , A_ : bool = False , A_ : float = 0.0 , A_ : Optional[List[str]] = None , A_ : Optional[List[str]] = None , A_ : str = "AdamWeightDecay" , **A_ : int , ) -> Any:
"""simple docstring"""
super().__init__(A_ , A_ , A_ , A_ , A_ , A_ , **A_ )
lowerCamelCase_ = weight_decay_rate
lowerCamelCase_ = include_in_weight_decay
lowerCamelCase_ = exclude_from_weight_decay
@classmethod
def a__ ( cls : Optional[Any] , A_ : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = {'WarmUp': WarmUp}
return super(A_ , cls ).from_config(A_ , custom_objects=A_ )
def a__ ( self : Dict , A_ : List[str] , A_ : Optional[int] , A_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super(A_ , self )._prepare_local(A_ , A_ , A_ )
lowerCamelCase_ = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def a__ ( self : int , A_ : str , A_ : Any , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def a__ ( self : int , A_ : Any , A_ : Optional[int]=None , **A_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = list(zip(*A_ ) )
return super(A_ , self ).apply_gradients(zip(A_ , A_ ) , name=A_ , **A_ )
def a__ ( self : Any , A_ : Tuple , A_ : List[str] , A_ : List[str] ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCamelCase_ = apply_state or {}
lowerCamelCase_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCamelCase_ = self._fallback_apply_state(A_ , A_ )
lowerCamelCase_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def a__ ( self : Dict , A_ : str , A_ : Dict , A_ : str=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self._get_lr(var.device , var.dtype.base_dtype , A_ )
lowerCamelCase_ = self._decay_weights_op(A_ , A_ , A_ )
with tf.control_dependencies([decay] ):
return super(A_ , self )._resource_apply_dense(A_ , A_ , **A_ )
def a__ ( self : Tuple , A_ : List[Any] , A_ : Union[str, Any] , A_ : List[str] , A_ : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self._get_lr(var.device , var.dtype.base_dtype , A_ )
lowerCamelCase_ = self._decay_weights_op(A_ , A_ , A_ )
with tf.control_dependencies([decay] ):
return super(A_ , self )._resource_apply_sparse(A_ , A_ , A_ , **A_ )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def a__ ( self : Any , A_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(A_ , A_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(A_ , A_ ) is not None:
return False
return True
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = None
@property
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if self._accum_steps is None:
lowerCamelCase_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=A_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , A_ : int ) -> int:
"""simple docstring"""
if not self._gradients:
lowerCamelCase_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(A_ ) , trainable=A_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(A_ ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(A_ )}""" )
for accum_gradient, gradient in zip(self._gradients , A_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(A_ )
self._accum_steps.assign_add(1 )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(A_ ) )
| 70
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=[1, 16, 4, 4] , _a=None , ):
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
a__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
a__ = (self.image_size // 32) ** 2
a__ = num_patches + 1
def lowercase__ ( self ):
"""simple docstring"""
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
a__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_a , )
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
a__ = ViTHybridModel(config=_a )
model.to(_a )
model.eval()
a__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
a__ = self.type_sequence_label_size
a__ = ViTHybridForImageClassification(_a )
model.to(_a )
model.eval()
a__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE:Tuple = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE:Union[str, Any] = False
SCREAMING_SNAKE_CASE:Optional[int] = False
SCREAMING_SNAKE_CASE:Tuple = False
def lowercase__ ( self ):
"""simple docstring"""
a__ = ViTHybridModelTester(self )
a__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_a )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = _config_zero_init(_a )
for model_class in self.all_model_classes:
a__ = model_class(config=_a )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
a__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = ViTHybridModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_a )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
# verify the logits
a__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
a__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
@require_accelerate
def lowercase__ ( self ):
"""simple docstring"""
a__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
a__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
a__ = prepare_img()
a__ = image_processor(images=_a , return_tensors='pt' )
a__ = model(**_a )
a__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
a__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 394
| 0
|
def lowercase_ ( A__ ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowercase_ ( A__ ) -> bool:
"""simple docstring"""
snake_case = credit_card_number
snake_case = 0
snake_case = len(A__ ) - 2
for i in range(A__ , -1 , -2 ):
# double the value of every second digit
snake_case = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
snake_case = cc_number[:i] + str(A__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(A__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowercase_ ( A__ ) -> bool:
"""simple docstring"""
snake_case = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(A__ ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(A__ ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(A__ ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 715
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase(self : List[str] ) -> Optional[int]:
snake_case = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_A ).to(_A )
snake_case = AutoTokenizer.from_pretrained("google/mt5-small" )
snake_case = tokenizer("Hello there" , return_tensors="pt" ).input_ids
snake_case = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
snake_case = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
snake_case = -(labels.shape[-1] * loss.item())
snake_case = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 294
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = '''bert-generation'''
def __init__( self : Tuple , __lowerCamelCase : Optional[int]=5_0_3_5_8 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : Optional[Any]=2_4 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Tuple=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : str=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
| 103
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Dict = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447
| 0
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__ = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_lowerCamelCase = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 59
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase = data_utils.TransfoXLTokenizer
_lowerCamelCase = data_utils.TransfoXLCorpus
_lowerCamelCase = data_utils
_lowerCamelCase = data_utils
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 59
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
|
"""simple docstring"""
def _snake_case ( __snake_case : str , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : str = len(__snake_case )
_lowerCamelCase : Union[str, Any] = len(__snake_case )
_lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase : Union[str, Any] = True
for i in range(__snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase : Tuple = True
if a[i].islower():
_lowerCamelCase : Tuple = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
| 0
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] ):
if not nums:
return 0
__magic_name__ = nums[0]
__magic_name__ = 0
for num in nums[1:]:
__magic_name__ , __magic_name__ = (
max_excluding + num,
max(UpperCAmelCase__ , UpperCAmelCase__ ),
)
return max(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A )
| 678
| 0
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = MobileBertTokenizer
__lowerCamelCase = MobileBertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
__lowerCamelCase = 'google/mobilebert-uncased'
def __UpperCAmelCase ( self ):
super().setUp()
UpperCAmelCase__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ : List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : str = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# With lower casing
UpperCAmelCase__ : Tuple = self.get_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : int = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : Any = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase__ : List[str] = {}
for i, token in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = i
UpperCAmelCase__ : str = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode_plus(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , )
UpperCAmelCase__ : Any = tokenizer_r.do_lower_case if hasattr(_lowerCAmelCase , """do_lower_case""" ) else False
UpperCAmelCase__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ["""的""", """人""", """有"""]
UpperCAmelCase__ : Tuple = """""".join(_lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase__ : List[str] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_lowerCAmelCase )
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 79
|
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[Any]:
"""simple docstring"""
a = {}
with open(snake_case_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a = []
_list.append([line.split()[1], line.split()[2]] )
a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a = []
_list.append([line.split()[0], line.split()[2]] )
a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
"""simple docstring"""
with open(snake_case_ ) as f:
a = f.read(1 )
a = start_node
a = []
a = start_node
a = 0
while visiting not in first_solution:
a = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution:
a = k[1]
a = k[0]
first_solution.append(snake_case_ )
a = distance_of_first_solution + int(snake_case_ )
a = best_node
first_solution.append(snake_case_ )
a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = []
for n in solution[1:-1]:
a = solution.index(snake_case_ )
for kn in solution[1:-1]:
a = solution.index(snake_case_ )
if n == kn:
continue
a = copy.deepcopy(snake_case_ )
a = kn
a = n
a = 0
for k in _tmp[:-1]:
a = _tmp[_tmp.index(snake_case_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a = distance + int(i[1] )
_tmp.append(snake_case_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = 1
a = first_solution
a = []
a = distance_of_first_solution
a = solution
while count <= iters:
a = find_neighborhood(snake_case_, snake_case_ )
a = 0
a = neighborhood[index_of_best_solution]
a = len(snake_case_ ) - 1
a = False
while not found:
a = 0
while i < len(snake_case_ ):
if best_solution[i] != solution[i]:
a = best_solution[i]
a = solution[i]
break
a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a = True
a = best_solution[:-1]
a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a = cost
a = solution
else:
a = index_of_best_solution + 1
a = neighborhood[index_of_best_solution]
if len(snake_case_ ) >= size:
tabu_list.pop(0 )
a = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( snake_case_=None ) -> List[str]:
"""simple docstring"""
a = generate_neighbours(args.File )
a , a = generate_first_solution(
args.File, snake_case_ )
a , a = tabu_search(
snake_case_, snake_case_, snake_case_, args.Iterations, args.Size, )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 387
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase__ : Dict = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = """hopper-medium-v2"""
UpperCAmelCase__ : Union[str, Any] = gym.make(env_name)
UpperCAmelCase__ : str = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
UpperCAmelCase__ : Union[str, Any] = env.reset()
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Union[str, Any] = 1_000
UpperCAmelCase__ : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase__ : Optional[int] = pipeline(obs, planning_horizon=32)
# execute action in environment
UpperCAmelCase__ : Dict = env.step(denorm_actions)
UpperCAmelCase__ : str = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase__ : Tuple = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 710
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : List[Any] = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["""ViTFeatureExtractor"""]
UpperCAmelCase__ : List[Any] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446
| 0
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCamelCase : List[Any] = True
except ImportError:
lowerCamelCase : Optional[Any] = False
lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase__( A ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class snake_case__ ( UpperCamelCase_ ):
@staticmethod
def UpperCAmelCase__ ( _lowerCamelCase : ArgumentParser ):
snake_case__ : Tuple = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=_lowerCamelCase , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=_lowerCamelCase , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Any , _lowerCamelCase : bool , _lowerCamelCase : str , _lowerCamelCase : Any=None , *_lowerCamelCase : List[str] ):
snake_case__ : str = testing
snake_case__ : Optional[Any] = testing_file
snake_case__ : Dict = path
def UpperCAmelCase__ ( self : Union[str, Any] ):
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ : Tuple = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]]
if len(_lowerCamelCase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
snake_case__ : Optional[Any] = (
Path(_lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ : Optional[Any] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowerCamelCase ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
snake_case__ : Optional[Any] = json.load(_lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_lowerCamelCase , extra_context=_lowerCamelCase , )
snake_case__ : str = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
snake_case__ : Dict = json.load(_lowerCamelCase )
snake_case__ : Dict = configuration['lowercase_modelname']
snake_case__ : Any = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'''{directory}/configuration.json''' )
snake_case__ : Any = 'PyTorch' in generate_tensorflow_pytorch_and_flax
snake_case__ : List[Any] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
snake_case__ : int = 'Flax' in generate_tensorflow_pytorch_and_flax
snake_case__ : str = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=_lowerCamelCase )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(_lowerCamelCase : Optional[int] ):
with open(_lowerCamelCase , 'r' ) as f:
snake_case__ : List[Any] = f.readlines()
with open(_lowerCamelCase , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
# Create temp file
snake_case__ , snake_case__ : str = mkstemp()
snake_case__ : List[Any] = False
with fdopen(_lowerCamelCase , 'w' ) as new_file:
with open(_lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(_lowerCamelCase )
if line_to_copy_below in line:
snake_case__ : str = True
for line_to_copy in lines_to_copy:
new_file.write(_lowerCamelCase )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(_lowerCamelCase , _lowerCamelCase )
# Remove original file
remove(_lowerCamelCase )
# Move new file
move(_lowerCamelCase , _lowerCamelCase )
def skip_units(_lowerCamelCase : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowerCamelCase : int ):
with open(_lowerCamelCase ) as datafile:
snake_case__ : Dict = []
snake_case__ : List[Any] = False
snake_case__ : Dict = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ : Optional[Any] = line.split('"' )[1]
snake_case__ : int = skip_units(_lowerCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ : str = line.split('"' )[1]
snake_case__ : str = skip_units(_lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
snake_case__ : Any = []
elif "# Replace with" in line and "##" not in line:
snake_case__ : List[str] = []
elif "##" not in line:
lines_to_copy.append(_lowerCamelCase )
remove(_lowerCamelCase )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(_lowerCamelCase )
| 170
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( UpperCamelCase_ ):
def __init__( self : int , _lowerCamelCase : str , _lowerCamelCase : Tuple ):
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case__ : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _lowerCamelCase : int = 1 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 5_0 , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _lowerCamelCase ):
snake_case__ : Optional[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case__ : Any = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case__ : int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case__ : Optional[int] = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case__ : int = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , eta=_lowerCamelCase , use_clipped_model_output=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
snake_case__ : int = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Union[str, Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 170
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['MaskFormerFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__SCREAMING_SNAKE_CASE = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 703
|
"""simple docstring"""
def A_ ( __lowercase = 10 ):
if not isinstance(__lowercase , __lowercase ) or n < 0:
raise ValueError('Invalid input' )
UpperCamelCase_ : int =10**n
UpperCamelCase_ : List[str] =2_84_33 * (pow(2 , 7_83_04_57 , __lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 395
| 0
|
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowercase__ :
'''simple docstring'''
@property
def lowerCamelCase_ ( self ) -> Dict:
return self.get_dummy_input()
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def lowerCamelCase_ ( self , snake_case=True , snake_case=False , snake_case=False , snake_case=False , ) -> str:
_UpperCAmelCase = 4
_UpperCAmelCase = 32
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = torch.device(snake_case )
_UpperCAmelCase = (batch_size, num_channels) + sizes
_UpperCAmelCase = randn_tensor(snake_case , generator=snake_case , device=snake_case )
_UpperCAmelCase = {'hidden_states': hidden_states}
if include_temb:
_UpperCAmelCase = 128
_UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=snake_case , device=snake_case )
if include_res_hidden_states_tuple:
_UpperCAmelCase = torch.manual_seed(1 )
_UpperCAmelCase = (randn_tensor(snake_case , generator=snake_case , device=snake_case ),)
if include_encoder_hidden_states:
_UpperCAmelCase = floats_tensor((batch_size, 32, 32) ).to(snake_case )
if include_skip_sample:
_UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=snake_case , device=snake_case )
return dummy_input
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_UpperCAmelCase = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**snake_case )
unet_block.to(snake_case )
unet_block.eval()
with torch.no_grad():
_UpperCAmelCase = unet_block(**snake_case )
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCAmelCase = output[0, -1, -3:, -3:]
_UpperCAmelCase = torch.tensor(snake_case ).to(snake_case )
assert torch_all_close(output_slice.flatten() , snake_case , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**snake_case )
model.to(snake_case )
model.train()
_UpperCAmelCase = model(**snake_case )
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = output[0]
_UpperCAmelCase = torch.device(snake_case )
_UpperCAmelCase = randn_tensor(output.shape , device=snake_case )
_UpperCAmelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
loss.backward()
| 573
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''efficientformer'''
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [48, 96, 224, 448] , snake_case = [True, True, True, True] , snake_case = 448 , snake_case = 32 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 16 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1E-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1E-12 , snake_case = 224 , snake_case = 1E-05 , **snake_case , ) -> None:
super().__init__(**snake_case )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_expansion_ratio
_UpperCAmelCase = downsamples
_UpperCAmelCase = dim
_UpperCAmelCase = key_dim
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = resolution
_UpperCAmelCase = pool_size
_UpperCAmelCase = downsample_patch_size
_UpperCAmelCase = downsample_stride
_UpperCAmelCase = downsample_pad
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = num_metaad_blocks
_UpperCAmelCase = distillation
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = image_size
_UpperCAmelCase = batch_norm_eps
| 573
| 1
|
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int:
a__ : List[str] = [1]
a__ , a__ , a__ : Union[str, Any] = 0, 0, 0
a__ : Optional[Any] = ugly_nums[ia] * 2
a__ : Tuple = ugly_nums[ia] * 3
a__ : Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
a__ : Optional[Any] = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
a__ : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
a__ : List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
a__ : Dict = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(2_00) = }')
| 207
|
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> float:
a__ : Optional[Any] = 0
while len(__UpperCamelCase ) > 1:
a__ : str = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a__ : List[str] = files.index(min(__UpperCamelCase ) )
temp += files[min_index]
files.pop(__UpperCamelCase )
files.append(__UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 1
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
UpperCAmelCase_ = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
UpperCAmelCase_ = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase_ = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 32
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( __lowerCamelCase = "laptop" ):
SCREAMING_SNAKE_CASE_ = F'''https://www.amazon.in/laptop/s?k={product}'''
SCREAMING_SNAKE_CASE_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(__lowerCamelCase, headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''', attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''}, ), soup.find_all('''div''', attrs={'''class''': '''a-row a-size-base a-color-base'''} ), ):
try:
SCREAMING_SNAKE_CASE_ = item.ha.text
SCREAMING_SNAKE_CASE_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE_ = item.find('''span''', attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE_ = '''Not available'''
try:
SCREAMING_SNAKE_CASE_ = (
'''₹'''
+ item.find(
'''span''', attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE_ = ''''''
try:
SCREAMING_SNAKE_CASE_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''', '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''', '''''' ) )
)
* 1_00 )
except ValueError:
SCREAMING_SNAKE_CASE_ = float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE_ = ''' '''
SCREAMING_SNAKE_CASE_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 700
|
def A__ ( __lowerCamelCase = 10**12 ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 597
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : float , snake_case__ : float , snake_case__ : int ):
"""simple docstring"""
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(snake_case__ , snake_case__ ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
_snake_case : Any = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_snake_case : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 609
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Dict = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : List[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : List[str] = value
elif weight_type == "weight_g":
_snake_case : Optional[int] = value
elif weight_type == "weight_v":
_snake_case : List[str] = value
elif weight_type == "bias":
_snake_case : Optional[int] = value
else:
_snake_case : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
_snake_case : Any = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_snake_case : Tuple = True
if "*" in mapped_key:
_snake_case : Dict = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Optional[int] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : int = """weight_g"""
elif "weight_v" in name:
_snake_case : Tuple = """weight_v"""
elif "weight" in name:
_snake_case : Optional[int] = """weight"""
elif "bias" in name:
_snake_case : str = """bias"""
else:
_snake_case : Tuple = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[int] = full_name.split("""conv_layers.""" )[-1]
_snake_case : Tuple = name.split(""".""" )
_snake_case : Dict = int(items[0] )
_snake_case : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Tuple=True ):
"""simple docstring"""
if config_path is not None:
_snake_case : Dict = HubertConfig.from_pretrained(snake_case__ )
else:
_snake_case : List[str] = HubertConfig()
if is_finetuned:
if dict_path:
_snake_case : Optional[int] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Optional[int] = target_dict.pad_index
_snake_case : Union[str, Any] = target_dict.bos_index
_snake_case : List[Any] = target_dict.eos_index
_snake_case : List[str] = len(target_dict.symbols )
_snake_case : Any = os.path.join(snake_case__ , """vocab.json""" )
if not os.path.isdir(snake_case__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_snake_case : Any = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case__ , )
_snake_case : List[str] = True if config.feat_extract_norm == """layer""" else False
_snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case : Optional[Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_snake_case : Tuple = HubertForCTC(snake_case__ )
else:
_snake_case : Any = HubertModel(snake_case__ )
if is_finetuned:
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_snake_case : List[str] = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 609
| 1
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Dict = test_results.split(' ' )
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Tuple = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase : Union[str, Any] = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Any = None
UpperCAmelCase : Dict = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : List[Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
UpperCAmelCase : Dict = line
UpperCAmelCase : Optional[Any] = False
return failures
class A_ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Tuple = title
UpperCAmelCase : Union[str, Any] = doc_test_results['time_spent'].split(',' )[0]
UpperCAmelCase : str = doc_test_results['success']
UpperCAmelCase : Any = doc_test_results['failures']
UpperCAmelCase : int = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase : int = doc_test_results
@property
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : int = [self._time_spent]
UpperCAmelCase : Optional[int] = 0
for time in time_spent:
UpperCAmelCase : Optional[Any] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowercase_ ) == 1:
UpperCAmelCase : str = [0, 0, time_parts[0]]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f"""{int(lowercase_ )}h{int(lowercase_ )}m{int(lowercase_ )}s"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def UpperCAmelCase_ ( self : str ) -> Dict:
UpperCAmelCase : int = 40
UpperCAmelCase : Union[str, Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowercase_ , lowercase_ )}
UpperCAmelCase : Union[str, Any] = ''
for category, failures in category_failures.items():
if len(lowercase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowercase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Optional[int] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowercase_ )
@staticmethod
def UpperCAmelCase_ ( ) -> Dict:
UpperCAmelCase : Optional[int] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowercase_ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowercase_ , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
UpperCAmelCase : Dict = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
UpperCAmelCase : int = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowercase_ , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : int ) -> str:
UpperCAmelCase : Tuple = ''
for key, value in failures.items():
UpperCAmelCase : int = value[:200] + ' [Truncated]' if len(lowercase_ ) > 250 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
UpperCAmelCase : Any = job_name
UpperCAmelCase : Any = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
UpperCAmelCase : Optional[Any] = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
UpperCAmelCase : Union[str, Any] = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
UpperCAmelCase : Optional[Any] = sorted(self.doc_test_results.items() , key=lambda lowercase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
UpperCAmelCase : Dict = f"""*Num failures* :{len(job_result["failed"] )} \n"""
UpperCAmelCase : Optional[Any] = job_result['failures']
UpperCAmelCase : Any = self.get_reply_blocks(lowercase_ , lowercase_ , lowercase_ , text=lowercase_ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f"""Results for {job}""" , blocks=lowercase_ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCamelCase( ):
UpperCAmelCase : int = os.environ['GITHUB_RUN_ID']
UpperCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
UpperCAmelCase : Dict = requests.get(UpperCAmelCase_ ).json()
UpperCAmelCase : Dict = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCAmelCase : Union[str, Any] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , UpperCAmelCase_ )
return {}
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = {}
if os.path.exists(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = os.listdir(UpperCAmelCase_ )
for file in files:
try:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , encoding='utf-8' ) as f:
UpperCAmelCase : Optional[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )}.""" ) from e
return _artifact
def UpperCamelCase( ):
class A_ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : str ) -> Dict:
UpperCAmelCase : List[Any] = name
UpperCAmelCase : Tuple = []
def __str__( self : Any ) -> Dict:
return self.name
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
UpperCAmelCase : Dict[str, Artifact] = {}
UpperCAmelCase : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase : Tuple = directory
if artifact_name not in _available_artifacts:
UpperCAmelCase : Optional[int] = Artifact(UpperCAmelCase_ )
_available_artifacts[artifact_name].add_path(UpperCAmelCase_ )
return _available_artifacts
if __name__ == "__main__":
lowercase__ = get_job_links()
lowercase__ = retrieve_available_artifacts()
lowercase__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase__ = github_actions_job_links.get("run_doctests")
lowercase__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
lowercase__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
lowercase__ , lowercase__ , lowercase__ = handle_test_results(artifact["stats"])
lowercase__ = failed
lowercase__ = success
lowercase__ = time_spent[1:-1] + ", "
lowercase__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
lowercase__ = line.replace("FAILED ", "")
lowercase__ = line.split()[0].replace("\n", "")
if "::" in line:
lowercase__ , lowercase__ = line.split("::")
else:
lowercase__ , lowercase__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase__ = all_failures[test] if test in all_failures else "N/A"
lowercase__ = failure
break
lowercase__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 695
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE : List[Any] = random.Random()
if is_torch_available():
import torch
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str=1.0 , snake_case_ : Union[str, Any]=None , snake_case_ : int=None ) -> str:
"""simple docstring"""
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2_000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16_000 , lowerCamelCase=True , lowerCamelCase=True , ):
'''simple docstring'''
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = feature_size
_lowerCAmelCase = padding_value
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = do_normalize
def A__ (self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A__ (self , lowerCamelCase=False , lowerCamelCase=False ):
'''simple docstring'''
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
_lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = ASTFeatureExtractor
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = ASTFeatureExtractionTester(self )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_lowerCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
# Test batched
_lowerCAmelCase = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="""np""" ).input_values
_lowerCAmelCase = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(lowerCamelCase )
_lowerCAmelCase = feat_extract(lowerCamelCase , return_tensors="""np""" ).input_values
_lowerCAmelCase = feat_extract(lowerCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
@require_torch
def A__ (self ):
'''simple docstring'''
import torch
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = np.random.rand(100 ).astype(np.floataa )
_lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCAmelCase = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
from datasets import load_dataset
_lowerCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort("""id""" ).select(range(lowerCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = ASTFeatureExtractor()
_lowerCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1e-4 ) )
| 156
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : str ) -> Union[str, Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'help': 'Should contain the data files for the task.'} )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase = processors[data_args.task_name]()
_lowerCAmelCase = processor.get_labels()
_lowerCAmelCase = len(snake_case_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case_ : EvalPrediction ) -> Dict:
_lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case_ , p.label_ids )}
# Data collator
_lowerCAmelCase = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , compute_metrics=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(snake_case_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , snake_case_ , snake_case_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(snake_case_ )
return results
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 156
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ : List[str] = False
class a ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(
image=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
snake_case__ : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 717
|
'''simple docstring'''
from collections.abc import Sequence
def _a ( __lowerCAmelCase : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
snake_case__ : Optional[Any] = nums[0]
for i in range(1 , len(__lowerCAmelCase ) ):
snake_case__ : Tuple = nums[i]
snake_case__ : Optional[int] = max(__lowerCAmelCase , ans + num , __lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase__ : str = int(input("""Enter number of elements : """).strip())
lowerCAmelCase__ : Dict = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 502
| 0
|
def a_ ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCamelCase = abs(UpperCamelCase_ )
lowerCamelCase = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def a_ ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCamelCase = abs(UpperCamelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def a_ ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase_ ) for c in str(abs(UpperCamelCase_ ) ) )
def a_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase_ : Callable , UpperCamelCase_ : int ) -> None:
lowerCamelCase = f'''{func.__name__}({value})'''
lowerCamelCase = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(UpperCamelCase_ )} -- {timing:.4f} seconds''' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase_ , UpperCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 246
|
from __future__ import annotations
_lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : dict[str, list[str]] , __snake_case : str ) -> None:
'''simple docstring'''
lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase = {}
lowerCamelCase = source_vertex
def lowerCamelCase__ ( self : List[Any] ) -> None:
'''simple docstring'''
lowerCamelCase = {self.source_vertex}
lowerCamelCase = None
lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
lowerCamelCase = vertex
queue.append(__snake_case )
def lowerCamelCase__ ( self : List[Any] , __snake_case : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase = self.parent.get(__snake_case )
if target_vertex_parent is None:
lowerCamelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 246
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class a ( _A ):
def __init__( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=1 , **__lowerCAmelCase : Dict ):
_UpperCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
_UpperCAmelCase = []
for i in range(UpperCamelCase__ ):
_UpperCAmelCase = placeholder_token + f'''_{i}'''
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
_UpperCAmelCase = output
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[Any]=1.0 ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCAmelCase = self.token_map[placeholder_token]
_UpperCAmelCase = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCAmelCase = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
_UpperCAmelCase = text.replace(UpperCamelCase__ , """ """.join(UpperCamelCase__ ) )
return text
def __call__( self : Optional[int] , __lowerCAmelCase : List[str] , *__lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : str=1.0 , **__lowerCAmelCase : str ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , *__lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Union[str, Any]=1.0 , **__lowerCAmelCase : Optional[Any] ):
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 710
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(lowercase )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return sum(int(lowercase ) for c in str(abs(lowercase ) ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase ,lowercase ) -> None:
_UpperCAmelCase = f'''{func.__name__}({value})'''
_UpperCAmelCase = timeit(f'''__main__.{call}''' ,setup="""import __main__""" )
print(f'''{call:56} = {func(lowercase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase ,lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 275
| 0
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
if not is_accelerate_available():
return method
_A = version.parse(accelerate.__version__ ).base_version
if version.parse(__snake_case ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__snake_case : List[str] , **__snake_case : int ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__snake_case , **__snake_case )
return wrapper
| 107
|
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , A : List[str] , A : List[str] , A : bool = True , A : bool = False ):
_UpperCAmelCase : Dict = scheduler
_UpperCAmelCase : str = optimizers if isinstance(A , (list, tuple) ) else [optimizers]
_UpperCAmelCase : List[str] = split_batches
_UpperCAmelCase : Dict = step_with_optimizer
_UpperCAmelCase : Tuple = GradientState()
def snake_case_ ( self : Optional[int] , *A : List[Any] , **A : Optional[int] ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*A , **A )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*A , **A )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_UpperCAmelCase : int = AcceleratorState().num_processes
for _ in range(A ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*A , **A )
else:
self.scheduler.step(*A , **A )
def snake_case_ ( self : Union[str, Any] ):
return self.scheduler.get_last_lr()
def snake_case_ ( self : str ):
return self.scheduler.state_dict()
def snake_case_ ( self : str , A : Optional[Any] ):
self.scheduler.load_state_dict(A )
def snake_case_ ( self : Tuple ):
return self.scheduler.get_lr()
def snake_case_ ( self : Optional[int] , *A : List[str] , **A : Optional[Any] ):
return self.scheduler.print_lr(*A , **A )
| 289
| 0
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCAmelCase_ )
_UpperCamelCase : Any = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
_UpperCamelCase : int = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCamelCase : Optional[int] = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCamelCase : Any = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : Any = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCamelCase : str = F'''layers_{str(UpperCAmelCase_ )}'''
# Self-Attention
_UpperCamelCase : Tuple = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCamelCase : str = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCamelCase : int = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCamelCase : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCamelCase : Union[str, Any] = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCamelCase : Dict = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCamelCase : str = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCamelCase : Dict = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCamelCase : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCamelCase : Optional[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCamelCase : int = flax_model.params['encoder']['block'][str(UpperCAmelCase_ )]['layer']
_UpperCamelCase : Any = tax_attention_key
_UpperCamelCase : Optional[Any] = tax_attention_out
_UpperCamelCase : Optional[Any] = tax_attention_query
_UpperCamelCase : Optional[int] = tax_attention_value
_UpperCamelCase : Optional[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : int = tax_global_layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[int] = tax_mlp_wi_a
_UpperCamelCase : Tuple = tax_mlp_wi_a
else:
_UpperCamelCase : List[Any] = tax_mlp_wi
_UpperCamelCase : List[str] = tax_mlp_wo
_UpperCamelCase : Optional[Any] = tax_mlp_layer_norm
_UpperCamelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCamelCase : Tuple = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCamelCase : Dict = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : Dict = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCamelCase : Any = tax_encoder_global_rel_embedding
# Assigning
_UpperCamelCase : Optional[int] = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCamelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCamelCase : str = F'''layers_{str(UpperCAmelCase_ )}'''
# Self-Attention
_UpperCamelCase : Union[str, Any] = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCamelCase : Dict = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCamelCase : Optional[int] = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCamelCase : Optional[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCamelCase : List[str] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCamelCase : Any = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCamelCase : Optional[int] = tax_enc_dec_attention_module['key']['kernel']
_UpperCamelCase : Tuple = tax_enc_dec_attention_module['out']['kernel']
_UpperCamelCase : str = tax_enc_dec_attention_module['query']['kernel']
_UpperCamelCase : List[str] = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCamelCase : Optional[int] = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCamelCase : str = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCamelCase : int = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCamelCase : Tuple = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCamelCase : Optional[int] = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCamelCase : List[Any] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCamelCase : List[str] = flax_model.params['decoder']['block'][str(UpperCAmelCase_ )]['layer']
_UpperCamelCase : List[Any] = tax_attention_key
_UpperCamelCase : Optional[int] = tax_attention_out
_UpperCamelCase : Dict = tax_attention_query
_UpperCamelCase : Dict = tax_attention_value
_UpperCamelCase : Dict = tax_pre_attention_layer_norm
_UpperCamelCase : Optional[Any] = tax_enc_dec_attention_key
_UpperCamelCase : List[Any] = tax_enc_dec_attention_out
_UpperCamelCase : int = tax_enc_dec_attention_query
_UpperCamelCase : int = tax_enc_dec_attention_value
_UpperCamelCase : Any = tax_cross_layer_norm
if split_mlp_wi:
_UpperCamelCase : int = tax_mlp_wi_a
_UpperCamelCase : int = tax_mlp_wi_a
else:
_UpperCamelCase : Any = tax_mlp_wi
_UpperCamelCase : Tuple = tax_mlp_wo
_UpperCamelCase : List[Any] = txa_mlp_layer_norm
_UpperCamelCase : str = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCamelCase : Optional[Any] = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCamelCase : Dict = txa_decoder_norm
# Only for layer 0:
_UpperCamelCase : Optional[int] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCamelCase : str = tax_decoder_rel_embedding
# Token Embeddings
_UpperCamelCase : List[str] = tax_model['target']['token_embedder']['embedding']
_UpperCamelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCamelCase : List[str] = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(UpperCAmelCase_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 648
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = RoFormerTokenizer
UpperCAmelCase__ = RoFormerTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
def lowerCamelCase__ ( self : Optional[Any] , **__snake_case : Union[str, Any] ) -> Tuple:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def lowerCamelCase__ ( self : Dict , **__snake_case : Optional[int] ) -> List[Any]:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__magic_name__: Dict = """永和服装饰品有限公司,今天天气非常好"""
__magic_name__: Union[str, Any] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__magic_name__: int = self.get_tokenizer()
__magic_name__, __magic_name__: List[Any] = self.get_chinese_input_output_texts()
__magic_name__: Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__: List[str] = tokens + [tokenizer.unk_token]
__magic_name__: Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: str = self.get_rust_tokenizer()
__magic_name__, __magic_name__: Optional[Any] = self.get_chinese_input_output_texts()
__magic_name__: Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__: List[Any] = tokens + [tokenizer.unk_token]
__magic_name__: List[str] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
pass
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
pass
| 96
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : Optional[int] = LEDConfig
lowerCAmelCase : List[str] = {}
lowerCAmelCase : str = '''gelu'''
def __init__( self : int , _lowercase : Tuple , _lowercase : Dict=13 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[int]=True , _lowercase : Any=False , _lowercase : Optional[int]=99 , _lowercase : Dict=32 , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=4 , _lowercase : List[str]=37 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]=20 , _lowercase : Tuple=2 , _lowercase : str=1 , _lowercase : Optional[int]=0 , _lowercase : List[str]=4 , ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = parent
_UpperCamelCase: Union[str, Any] = batch_size
_UpperCamelCase: Optional[Any] = seq_length
_UpperCamelCase: str = is_training
_UpperCamelCase: Dict = use_labels
_UpperCamelCase: List[Any] = vocab_size
_UpperCamelCase: Dict = hidden_size
_UpperCamelCase: int = num_hidden_layers
_UpperCamelCase: Dict = num_attention_heads
_UpperCamelCase: Dict = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_dropout_prob
_UpperCamelCase: Any = attention_probs_dropout_prob
_UpperCamelCase: Optional[Any] = max_position_embeddings
_UpperCamelCase: Union[str, Any] = eos_token_id
_UpperCamelCase: Union[str, Any] = pad_token_id
_UpperCamelCase: Optional[Any] = bos_token_id
_UpperCamelCase: Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase: Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase: Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase: Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase: Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase: Dict = prepare_led_inputs_dict(_lowercase , _lowercase , _lowercase )
_UpperCamelCase: Tuple = tf.concat(
[tf.zeros_like(_lowercase )[:, :-1], tf.ones_like(_lowercase )[:, -1:]] , axis=-1 , )
_UpperCamelCase: List[Any] = global_attention_mask
return config, inputs_dict
def lowerCAmelCase ( self : Dict , _lowercase : List[str] , _lowercase : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = TFLEDModel(config=_lowercase ).get_decoder()
_UpperCamelCase: Optional[int] = inputs_dict['''input_ids''']
_UpperCamelCase: Dict = input_ids[:1, :]
_UpperCamelCase: Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase: Optional[int] = 1
# first forward pass
_UpperCamelCase: Optional[Any] = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
_UpperCamelCase , _UpperCamelCase: Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase: Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase: List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase: Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase: Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase: str = model(_lowercase , attention_mask=_lowercase )[0]
_UpperCamelCase: Optional[int] = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase: Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase: Any = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase: Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def lowerCAmelCase_ ( lowercase: Tuple , lowercase: List[str] , lowercase: List[str] , lowercase: int=None , lowercase: int=None , lowercase: List[str]=None , lowercase: List[str]=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase: Optional[Any] = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase: int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase : Union[str, Any] = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase : str = True
lowerCAmelCase : Any = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Dict = TFLEDModelTester(self )
_UpperCamelCase: Any = ConfigTester(self , config_class=_lowercase )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase: str = tf.zeros_like(inputs_dict['''attention_mask'''] )
_UpperCamelCase: Any = 2
_UpperCamelCase: List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_UpperCamelCase: int = True
_UpperCamelCase: str = self.model_tester.seq_length
_UpperCamelCase: Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowercase : Union[str, Any] ):
_UpperCamelCase: Tuple = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowercase : int ):
_UpperCamelCase: str = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase: Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase: Optional[int] = True
_UpperCamelCase: List[Any] = False
_UpperCamelCase: Optional[int] = False
_UpperCamelCase: Union[str, Any] = model_class(_lowercase )
_UpperCamelCase: Optional[int] = model(self._prepare_for_class(_lowercase , _lowercase ) )
_UpperCamelCase: List[str] = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_UpperCamelCase: Optional[int] = model_class(_lowercase )
_UpperCamelCase: Union[str, Any] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase: Union[str, Any] = True
_UpperCamelCase: List[str] = model_class(_lowercase )
_UpperCamelCase: Dict = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_UpperCamelCase: str = True
_UpperCamelCase: Dict = True
_UpperCamelCase: int = model_class(_lowercase )
_UpperCamelCase: Optional[int] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( lowercase: Tuple ) -> List[str]:
'''simple docstring'''
return tf.constant(lowercase , dtype=tf.intaa )
UpperCAmelCase_ = 1E-4
@slow
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_UpperCamelCase: List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: Union[str, Any] = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
_UpperCamelCase: Optional[Any] = model(**_lowercase )[0]
_UpperCamelCase: Optional[int] = (1, 1_024, 768)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_UpperCamelCase: Optional[Any] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_UpperCamelCase: Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: Tuple = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
_UpperCamelCase: Any = model(**_lowercase )[0]
_UpperCamelCase: Dict = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_UpperCamelCase: Optional[Any] = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 , rtol=1E-3 )
| 271
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case : Any = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
lowerCAmelCase__ = self.transformer_dir
shutil.copy(
os.path.join(_UpperCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowerCAmelCase__ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
lowerCAmelCase__ = os.path.join(self.transformer_dir , 'new_code.py' )
with open(_UpperCamelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , _UpperCamelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase__ = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('Bert' , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , _UpperCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , _UpperCamelCase ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowerCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowerCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowerCAmelCase__ , lowerCAmelCase__ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['format_model_list'] )
self.assertFalse(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCamelCase )
lowerCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowerCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ , lowerCAmelCase__ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 365
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Tuple = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["""GLPNFeatureExtractor"""]
__snake_case : Optional[Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__lowercase : str =False
try:
__lowercase : Optional[int] =_is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class A :
def __init__( self: List[Any] , _lowerCAmelCase: str = None , _lowerCAmelCase: list = [] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =choices
UpperCAmelCase_ =prompt
if sys.platform == "win32":
UpperCAmelCase_ ="*"
else:
UpperCAmelCase_ ="➔ "
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: str = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowerCAmelCase )
else:
forceWrite(self.choices[index] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowerCAmelCase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Direction , _lowerCAmelCase: int = 1 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowerCAmelCase )
move_cursor(_lowerCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowerCAmelCase )] for number in range(10 )] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =int(chr(self.current_selection ) )
UpperCAmelCase_ =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowerCAmelCase )
else:
return
else:
return
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int = 0 ) -> Dict:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ =default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowerCAmelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ =int(builtins.input() )
except ValueError:
UpperCAmelCase_ =default_choice
else:
UpperCAmelCase_ =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_lowerCAmelCase , "\n" )
return choice
| 54
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( A : Optional[Any] , A : Optional[int] , A : int ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : str = TaConfig.from_json_file(A )
print(F"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = TaForConditionalGeneration(A )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A , A , A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 527
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Any = LxmertTokenizer
_lowerCAmelCase : Union[str, Any] = LxmertTokenizerFast
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[str] = True
def _snake_case ( self : Union[str, Any] ):
super().setUp()
snake_case_ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self : Tuple , lowercase_ : List[str] ):
snake_case_ : List[str] = '''UNwant\u00E9d,running'''
snake_case_ : int = '''unwanted, running'''
return input_text, output_text
def _snake_case ( self : int ):
snake_case_ : Tuple = self.tokenizer_class(self.vocab_file )
snake_case_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowercase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self : List[str] ):
if not self.test_rust_tokenizer:
return
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Dict = '''I was born in 92000, and this is falsé.'''
snake_case_ : int = tokenizer.tokenize(lowercase_ )
snake_case_ : Tuple = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : List[Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : Optional[int] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = self.get_rust_tokenizer()
snake_case_ : str = tokenizer.encode(lowercase_ )
snake_case_ : Union[str, Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 485
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowercase ( _a , _a , _a , _a=None , _a=None ):
# Recurse if needed
if "." in tensor_name:
snake_case_ : Union[str, Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case_ : Any = getattr(_a , _a )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
snake_case_ : int = new_module
snake_case_ : str = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
snake_case_ : Tuple = tensor_name in module._buffers
snake_case_ : Optional[int] = getattr(_a , _a )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
snake_case_ : Optional[Any] = False
snake_case_ : Tuple = False
else:
snake_case_ : Tuple = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case_ : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case_ : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case_ : Any = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : str = value.to('''cpu''' )
if value.dtype == torch.inta:
snake_case_ : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
snake_case_ : Tuple = torch.tensor(_a , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _a ) and fpaa_statistics is None:
snake_case_ : Any = new_value.T
snake_case_ : Tuple = old_value.__dict__
if is_abit:
snake_case_ : Tuple = bnb.nn.IntaParams(_a , requires_grad=_a , **_a ).to(_a )
elif is_abit:
snake_case_ : Any = bnb.nn.Paramsabit(_a , requires_grad=_a , **_a ).to(_a )
snake_case_ : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_a ) )
else:
if value is None:
snake_case_ : Dict = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : Dict = value.to(_a )
else:
snake_case_ : str = torch.tensor(_a , device=_a )
if is_buffer:
snake_case_ : Optional[int] = new_value
else:
snake_case_ : Optional[Any] = nn.Parameter(_a , requires_grad=old_value.requires_grad )
snake_case_ : List[Any] = new_value
def __lowercase ( _a , _a=None , _a=None , _a=None , _a=False ):
for name, module in model.named_children():
if current_key_name is None:
snake_case_ : List[str] = []
current_key_name.append(_a )
if (isinstance(_a , nn.Linear ) or isinstance(_a , _a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_a , _a ):
snake_case_, snake_case_ : List[Any] = module.weight.shape
else:
snake_case_ : Dict = module.in_features
snake_case_ : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case_ : str = bnb.nn.LinearabitLt(
_a , _a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case_ : str = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case_ : Union[str, Any] = bnb.nn.Linearabit(
_a , _a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case_ : List[Any] = True
# Store the module class in case we need to transpose the weight later
snake_case_ : str = type(_a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_a )
if len(list(module.children() ) ) > 0:
snake_case_, snake_case_ : Optional[int] = _replace_with_bnb_linear(
_a , _a , _a , _a , has_been_replaced=_a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase ( _a , _a=None , _a=None , _a=None ):
snake_case_ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case_, snake_case_ : List[Any] = _replace_with_bnb_linear(
_a , _a , _a , _a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _a , )
return replace_with_bnb_linear(*_a , **_a )
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _a , )
return set_module_quantized_tensor_to_device(*_a , **_a )
def __lowercase ( _a ):
snake_case_ : List[str] = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case_ : Optional[Any] = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a , _a ):
snake_case_ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ : str = sum(_a , [] )
snake_case_ : str = len(_a ) > 0
# Check if it is a base model
snake_case_ : Dict = not hasattr(_a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ : Dict = list(model.named_children() )
snake_case_ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ : Optional[int] = set(_a ) - set(_a )
snake_case_ : List[Any] = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
snake_case_ : Any = ['''.weight''', '''.bias''']
snake_case_ : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ : List[str] = name.replace(_a , '''''' )
filtered_module_names.append(_a )
return filtered_module_names
| 485
| 1
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__A = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__A = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__A = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return float((preds == labels).mean() )
def __A ( _lowercase , _lowercase , _lowercase="binary" ):
'''simple docstring'''
_A = simple_accuracy(_lowercase , _lowercase )
_A = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = {}
for id_pred, label in zip(_lowercase , _lowercase ):
_A = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
_A = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_A = [(pred, label)]
_A = [], []
for question, preds_labels in question_map.items():
_A = zip(*_lowercase )
_A = fa_score(y_true=_lowercase , y_pred=_lowercase , average='''macro''' )
fas.append(_lowercase )
_A = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
_A = float(sum(_lowercase ) / len(_lowercase ) )
_A = sum(_lowercase ) / len(_lowercase )
_A = float(fa_score(y_true=_lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: str ) -> Optional[int]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __A ( self: Optional[int] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __A ( self: Optional[int] , __A: List[str] , __A: List[str] ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_UpperCamelCase , _UpperCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(_UpperCamelCase , _UpperCamelCase , fa_avg='''macro''' )
elif self.config_name == "record":
_A = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_A = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_UpperCamelCase , _UpperCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_UpperCamelCase , _UpperCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_UpperCamelCase , _UpperCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 484
|
def __lowerCAmelCase ( __magic_name__ ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("only integers accepted as input" )
else:
_lowercase: Optional[Any] = str(abs(__magic_name__ ) )
_lowercase: Tuple = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 226
| 0
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( A,A,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
a_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
a_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
a_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self : str ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _snake_case ( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict=0 ):
'''simple docstring'''
if str(_lowerCamelCase ).startswith("""mps""" ):
__lowerCamelCase : str = torch.manual_seed(_lowerCamelCase )
else:
__lowerCamelCase : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowerCamelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self : int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self : Dict ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self._test_save_load_local()
def _snake_case ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 716
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:int = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662
|
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662
| 1
|
from functools import lru_cache
@lru_cache
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
_A = 0 # The first color of the flag.
_A = 1 # The second color of the flag.
_A = 2 # The third color of the flag.
_A = (red, white, blue)
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(A__ ) == 1:
return list(A__ )
snake_case = 0
snake_case = len(A__ ) - 1
snake_case = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case , snake_case = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case , snake_case = sequence[high], sequence[mid]
high -= 1
else:
snake_case = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(A__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by commas:\n").strip()
_A = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 294
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Union[str, Any]:
_snake_case : Dict = {}
_snake_case : Any = job['started_at']
_snake_case : Optional[Any] = job['completed_at']
_snake_case : Dict = date_parser.parse(lowercase_ )
_snake_case : int = date_parser.parse(lowercase_ )
_snake_case : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
_snake_case : Tuple = start
_snake_case : Optional[int] = end
_snake_case : List[str] = duration_in_min
return job_info
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[int]=None )-> int:
_snake_case : int = None
if token is not None:
_snake_case : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
_snake_case : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_snake_case : List[Any] = requests.get(lowercase_ , headers=lowercase_ ).json()
_snake_case : Optional[int] = {}
try:
job_time.update({job['name']: extract_time_from_single_job(lowercase_ ) for job in result['jobs']} )
_snake_case : Optional[int] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowercase_ ):
_snake_case : Any = requests.get(url + F"""&page={i + 2}""" , headers=lowercase_ ).json()
job_time.update({job['name']: extract_time_from_single_job(lowercase_ ) for job in result['jobs']} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = get_job_time(args.workflow_run_id)
lowerCAmelCase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 411
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def _a ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def _a ( self ) -> int:
'''simple docstring'''
def extract(*_lowerCAmelCase , **_lowerCAmelCase ):
class __UpperCamelCase :
def __init__( self ) -> List[str]:
'''simple docstring'''
lowercase = torch.ones([0] )
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , )
lowercase = output.images
lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase = 77
lowercase = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
lowercase = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.manual_seed(0 )
lowercase = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((760, 504) )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
lowercase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase = init_image.resize((768, 512) )
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowercase = """BAAI/AltDiffusion"""
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 588
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[Any]=[0, 1, 2, 3] , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : List[Any]=37 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Union[str, Any]=[1, 384, 24, 24] , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=None , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = backbone_out_indices
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = num_labels
_A = backbone_featmap_shape
_A = scope
_A = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Tuple ):
_A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ):
_A = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ):
_A = self.num_labels
_A = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = self.num_labels
_A = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : int ):
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
def lowerCAmelCase_ ( self : int ):
_A = DPTModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
if model_class in get_values(_UpperCAmelCase ):
continue
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = False
_A = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_A = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_A = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_A = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_A = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 'add'
with self.assertRaises(_UpperCAmelCase ):
_A = DPTForDepthEstimation(_UpperCAmelCase )
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
_A = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_A = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.predicted_depth
# verify the predicted depth
_A = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _UpperCAmelCase )
_A = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _UpperCAmelCase , atol=1E-4 ) )
| 703
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 505
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCamelCase ):
if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}." )
if i == 0:
__magic_name__ , __magic_name__ : List[str] =(
(Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase )
else:
return _interleave_iterable_datasets(
lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCamelCase ):
if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}." )
if i == 0:
__magic_name__ , __magic_name__ : List[Any] =(
(Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase )
else:
return _concatenate_iterable_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase )
| 21
|
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696
| 0
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] ={}
lowerCAmelCase : Dict[Optional[str], str] ={}
lowerCAmelCase : Dict[Optional[str], Exception] ={}
def A__ ( __A , __A , __A = None , ):
'''simple docstring'''
_lowerCamelCase : List[str] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
_lowerCamelCase : Any = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
_lowerCamelCase : Union[str, Any] = format_type
def A__ ( __A , __A , __A = None ):
'''simple docstring'''
_lowerCamelCase : int = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase : Optional[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
lowerCAmelCase : Any =ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
lowerCAmelCase : Dict =ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
lowerCAmelCase : Union[str, Any] =ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def A__ ( __A ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A__ ( __A , **__A ):
'''simple docstring'''
_lowerCamelCase : Any = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15
| 0
|
def a (lowerCAmelCase__ ): # noqa: E741
__a = len(lowerCAmelCase__ )
__a = 0
__a = [0] * n
__a = [False] * n
__a = [False] * n
def dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if parent == root:
out_edge_count += 1
__a = True
__a = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__a = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__a = True
# AP found via cycle
if at == low[to]:
__a = True
else:
__a = min(low[at] , lowerCAmelCase__ )
return out_edge_count
for i in range(lowerCAmelCase__ ):
if not visited[i]:
__a = 0
__a = dfs(lowerCAmelCase__ , lowerCAmelCase__ , -1 , lowerCAmelCase__ )
__a = out_edge_count > 1
for x in range(len(lowerCAmelCase__ ) ):
if is_art[x] is True:
print(lowerCAmelCase__ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 99
|
"""simple docstring"""
from math import factorial, radians
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 18 , SCREAMING_SNAKE_CASE = 10 ) -> float:
"""simple docstring"""
__snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case = radians(SCREAMING_SNAKE_CASE )
__snake_case = angle_in_radians
__snake_case = 3
__snake_case = -1
for _ in range(SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE )
__snake_case = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 163
| 0
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_lowerCamelCase = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
_lowerCamelCase = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
_lowerCamelCase = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
return float((preds == labels).mean() )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase_ : Any = en_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase_ : Dict = in_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase_ : List[str] = cdist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "cosine" )
UpperCAmelCase_ : List[Any] = np.array(range(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Any = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase_ : Any = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="numpy" if self.config_name != "cvit-mkb-clsr" else None ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_snake_case ,_snake_case )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_snake_case ,_snake_case )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_snake_case ,_snake_case )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 323
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = 8 ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = do_rescale
UpperCAmelCase_ : int = rescale_factor
UpperCAmelCase_ : Optional[Any] = do_pad
UpperCAmelCase_ : List[str] = pad_size
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ):
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = get_image_size(_snake_case )
UpperCAmelCase_ : List[Any] = (old_height // size + 1) * size - old_height
UpperCAmelCase_ : Any = (old_width // size + 1) * size - old_width
return pad(_snake_case ,((0, pad_height), (0, pad_width)) ,mode="symmetric" ,data_format=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[int] = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ : Tuple = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ : Optional[int] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Tuple = [to_numpy_array(_snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ : Any = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_pad:
UpperCAmelCase_ : Dict = [self.pad(_snake_case ,size=_snake_case ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
UpperCAmelCase_ : Dict = {"pixel_values": images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 323
| 1
|
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 667
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = os.path.join(__UpperCamelCase , """words.txt""" )
UpperCAmelCase__ : List[Any] = """"""
with open(__UpperCamelCase ) as f:
UpperCAmelCase__ : Any = f.readline()
UpperCAmelCase__ : Tuple = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCAmelCase__ : Dict = [
word
for word in [sum(ord(__UpperCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 706
|
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=A ,set_alpha_to_one=A ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
UpperCAmelCase__ : List[str] = CLIPTextModel(A )
UpperCAmelCase__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase__ : Optional[int] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowercase ( self : Union[str, Any] ,A : Dict ,A : Optional[Any]=0 ):
'''simple docstring'''
if str(A ).startswith("""mps""" ):
UpperCAmelCase__ : List[str] = torch.manual_seed(A )
else:
UpperCAmelCase__ : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A ,device=torch.device(A ) ,)
UpperCAmelCase__ : str = floats_tensor(control_image.shape ,rng=random.Random(A ) ).to(A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ : List[str] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((64, 64) )
UpperCAmelCase__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowercase ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(A : Union[str, Any] ):
if isinstance(A ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase__ : int = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(A )
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=A ,set_alpha_to_one=A ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
UpperCAmelCase__ : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase__ : Dict = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase__ : Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowercase ( self : Optional[int] ,A : List[Any] ,A : List[Any]=0 ):
'''simple docstring'''
if str(A ).startswith("""mps""" ):
UpperCAmelCase__ : List[str] = torch.manual_seed(A )
else:
UpperCAmelCase__ : int = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A ,device=torch.device(A ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A ,device=torch.device(A ) ,),
]
UpperCAmelCase__ : List[Any] = floats_tensor(control_image[0].shape ,rng=random.Random(A ) ).to(A )
UpperCAmelCase__ : str = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((64, 64) )
UpperCAmelCase__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = self.pipeline_class(**A )
pipe.to(A )
UpperCAmelCase__ : Any = 1_0.0
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(A )
UpperCAmelCase__ : Optional[Any] = steps
UpperCAmelCase__ : List[str] = scale
UpperCAmelCase__ : Optional[Any] = pipe(**A )[0]
UpperCAmelCase__ : Any = self.get_dummy_inputs(A )
UpperCAmelCase__ : Optional[int] = steps
UpperCAmelCase__ : Tuple = scale
UpperCAmelCase__ : List[Any] = pipe(**A ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(A )
UpperCAmelCase__ : Union[str, Any] = steps
UpperCAmelCase__ : Any = scale
UpperCAmelCase__ : str = pipe(**A ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase__ : Any = self.get_dummy_inputs(A )
UpperCAmelCase__ : List[str] = steps
UpperCAmelCase__ : Union[str, Any] = scale
UpperCAmelCase__ : Union[str, Any] = pipe(**A ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __lowercase ( self : Any ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_dummy_components()
UpperCAmelCase__ : str = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
UpperCAmelCase__ : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,safety_checker=A ,controlnet=A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ : Dict = """evil space-punk bird"""
UpperCAmelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
UpperCAmelCase__ : Optional[Any] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
UpperCAmelCase__ : List[str] = pipe(
A ,A ,control_image=A ,generator=A ,output_type="""np""" ,num_inference_steps=50 ,strength=0.6 ,)
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 194
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.