code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( _lowerCAmelCase , unittest.TestCase ):
__UpperCamelCase : Optional[int] = TransfoXLTokenizer
__UpperCamelCase : int = False
__UpperCamelCase : Any = False
def __UpperCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
super().setUp()
_a : Optional[int] =[
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_a : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __UpperCAmelCase ( self :Optional[Any] , **SCREAMING_SNAKE_CASE :Optional[int] ) -> Dict:
'''simple docstring'''
_a : Union[str, Any] =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] ="""<unk> UNwanted , running"""
_a : Union[str, Any] ="""<unk> unwanted, running"""
return input_text, output_text
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : str =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE )
_a : str =tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [0, 4, 8, 7] )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
_a : List[str] =TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Tuple =TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE )
_a : int ="""Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_a : str =[
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
_a : str =self.get_tokenizer()
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 276 |
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __snake_case ( a ):
UpperCAmelCase__ : Any = '''blenderbot-small'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
UpperCAmelCase__ : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , _snake_case : List[str]=50265 , _snake_case : Optional[Any]=512 , _snake_case : Optional[Any]=8 , _snake_case : str=2048 , _snake_case : Tuple=16 , _snake_case : int=8 , _snake_case : int=2048 , _snake_case : Dict=16 , _snake_case : Any=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=True , _snake_case : int="gelu" , _snake_case : List[str]=512 , _snake_case : int=0.1 , _snake_case : str=0.0 , _snake_case : List[str]=0.0 , _snake_case : Optional[int]=0.0_2 , _snake_case : Optional[Any]=1 , _snake_case : Dict=False , _snake_case : Tuple=0 , _snake_case : Optional[Any]=1 , _snake_case : Dict=2 , _snake_case : Union[str, Any]=2 , **_snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , )
class __snake_case ( a ):
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
UpperCAmelCase_ = {0: '''batch'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_snake_case):
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super().outputs
else:
UpperCAmelCase_ = super(_snake_case , self).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_snake_case):
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCamelCase ( self : Any , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
# Generate decoder inputs
UpperCAmelCase_ = seq_length if not self.use_past else 1
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ = dict(**_snake_case , **_snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
UpperCAmelCase_ = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = decoder_seq_length + 3
UpperCAmelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_snake_case , _snake_case)] , dim=1)
UpperCAmelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ = min(_snake_case , _snake_case)
UpperCAmelCase_ = max(_snake_case , _snake_case) - min_num_layers
UpperCAmelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_snake_case):
common_inputs["past_key_values"].append(
(
torch.zeros(_snake_case),
torch.zeros(_snake_case),
torch.zeros(_snake_case),
torch.zeros(_snake_case),
))
# TODO: test this.
UpperCAmelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_snake_case , _snake_case):
common_inputs["past_key_values"].append((torch.zeros(_snake_case), torch.zeros(_snake_case)))
return common_inputs
def lowerCamelCase ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = common_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(_snake_case)
]
return common_inputs
def lowerCamelCase ( self : Any , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(_snake_case)
UpperCAmelCase_ = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case)
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size
UpperCAmelCase_ = dict(tokenizer(_snake_case , return_tensors=_snake_case))
return common_inputs
def lowerCamelCase ( self : Dict , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
elif self.task == "causal-lm":
UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
else:
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
return common_inputs
def lowerCamelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[int]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case)
else:
UpperCAmelCase_ = super(_snake_case , self)._flatten_past_key_values_(
_snake_case , _snake_case , _snake_case , _snake_case)
| 369 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __snake_case :
def __init__( self , lowercase , ) -> Any:
'''simple docstring'''
a__: Optional[Any] = parent
a__: Optional[int] = 13
a__: Dict = 7
a__: Union[str, Any] = 30
a__: Any = self.seq_length + self.mem_len
a__: List[Any] = 15
a__: Any = True
a__: int = True
a__: List[str] = 99
a__: Optional[Any] = [10, 50, 80]
a__: Optional[int] = 32
a__: Optional[int] = 32
a__: List[Any] = 4
a__: Optional[Any] = 8
a__: Tuple = 1_28
a__: Dict = 2
a__: List[str] = 2
a__: Tuple = None
a__: Dict = 1
a__: List[Any] = 0
a__: List[Any] = 3
a__: List[Any] = self.vocab_size - 1
a__: Dict = 0.01
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: List[Any] = None
if self.use_labels:
a__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
random.seed(self.seed)
tf.random.set_seed(self.seed)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: Tuple = TFTransfoXLModel(__A)
a__: Optional[int] = model(__A).to_tuple()
a__: List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
a__: Optional[int] = model(__A).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: List[str] = TFTransfoXLLMHeadModel(__A)
a__: int = model(__A).to_tuple()
a__: int = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
a__: Optional[int] = model(__A).to_tuple()
a__: List[Any] = model([input_ids_a, mems_a]).to_tuple()
a__: Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
a__: Any = model(__A).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: List[str] = TFTransfoXLForSequenceClassification(__A)
a__: List[str] = model(__A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Union[str, Any] = self.prepare_config_and_inputs()
(a__): List[Any] = config_and_inputs
a__: Union[str, Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
a__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a__ = () if is_tf_available() else ()
a__ = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Any = TFTransfoXLModelTester(self)
a__: Dict = ConfigTester(self , config_class=__A , d_embed=37)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.model_tester.set_seed()
a__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__A)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
self.model_tester.set_seed()
a__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__A)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__A)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a__: Dict = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a__: Optional[Any] = model_class(__A)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
a__: Optional[int] = model.get_output_embeddings()
assert isinstance(__A , tf.keras.layers.Layer)
a__: Dict = model.get_bias()
assert name is None
else:
a__: Tuple = model.get_output_embeddings()
assert x is None
a__: str = model.get_bias()
assert name is None
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple = TFTransfoXLModel.from_pretrained(__A)
self.assertIsNotNone(__A)
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
@require_tf
class __snake_case ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.')
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: str = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
# fmt: off
a__: Dict = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a__: Dict = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a__: Optional[Any] = model.generate(__A , max_length=2_00 , do_sample=__A)
self.assertListEqual(output_ids[0].numpy().tolist() , __A)
| 290 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
set_seed(770)
_snake_case = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
_snake_case = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
_snake_case = os.path.dirname(os.path.abspath(__file__))
_snake_case = os.path.join(os.path.expanduser("~"), ".cache")
_snake_case = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
_A : List[Any] = model_type
if use_small:
key += "_small"
return os.path.join(snake_case_,REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
os.makedirs(snake_case_,exist_ok=snake_case_ )
hf_hub_download(repo_id=snake_case_,filename=snake_case_,local_dir=snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False,snake_case_="text" ):
if model_type == "text":
_A : Union[str, Any] = BarkSemanticModel
_A : Any = BarkSemanticConfig
_A : str = BarkSemanticGenerationConfig
elif model_type == "coarse":
_A : Any = BarkCoarseModel
_A : List[Any] = BarkCoarseConfig
_A : Dict = BarkCoarseGenerationConfig
elif model_type == "fine":
_A : str = BarkFineModel
_A : List[Any] = BarkFineConfig
_A : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_A : Union[str, Any] = f'''{model_type}_small''' if use_small else model_type
_A : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case_ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""],model_info["""file_name"""] )
_A : Optional[Any] = torch.load(snake_case_,map_location=snake_case_ )
# this is a hack
_A : Optional[int] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_A : Optional[Any] = model_args["""vocab_size"""]
_A : Tuple = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_A : Optional[Any] = model_args.pop("""n_head""" )
_A : Optional[Any] = model_args.pop("""n_embd""" )
_A : int = model_args.pop("""n_layer""" )
_A : List[str] = ConfigClass(**checkpoint["""model_args"""] )
_A : str = ModelClass(config=snake_case_ )
_A : Optional[Any] = GenerationConfigClass()
_A : Union[str, Any] = model_generation_config
_A : Dict = checkpoint["""model"""]
# fixup checkpoint
_A : Any = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(snake_case_ ):
# replace part of the key with corresponding layer name in HF implementation
_A : List[Any] = k[len(snake_case_ ) :]
for old_layer_name in new_layer_name_dict:
_A : int = new_k.replace(snake_case_,new_layer_name_dict[old_layer_name] )
_A : List[str] = state_dict.pop(snake_case_ )
_A : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
_A : Tuple = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_A : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
_A : Dict = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(snake_case_ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(snake_case_ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(snake_case_,strict=snake_case_ )
_A : Any = model.num_parameters(exclude_embeddings=snake_case_ )
_A : Tuple = checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1e6,1 )}M params, {round(snake_case_,3 )} loss''' )
model.eval()
model.to(snake_case_ )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( snake_case_,snake_case_=False,snake_case_="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_A : List[str] = """cpu""" # do conversion on cpu
_A : Optional[Any] = _get_ckpt_path(snake_case_,use_small=snake_case_ )
_A : List[str] = _load_model(snake_case_,snake_case_,model_type=snake_case_,use_small=snake_case_ )
# load bark initial model
_A : Optional[int] = _bark_load_model(snake_case_,"""cpu""",model_type=snake_case_,use_small=snake_case_ )
if model_type == "text":
_A : Optional[Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=snake_case_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_A : Tuple = 5
_A : Dict = 10
if model_type in ["text", "coarse"]:
_A : Dict = torch.randint(256,(batch_size, sequence_length),dtype=torch.int )
_A : Tuple = bark_model(snake_case_ )[0]
_A : int = model(snake_case_ )
# take last logits
_A : Dict = output_new_model_total.logits[:, [-1], :]
else:
_A : Dict = 3
_A : List[Any] = 8
_A : List[Any] = torch.randint(256,(batch_size, sequence_length, n_codes_total),dtype=torch.int )
_A : Tuple = model(snake_case_,snake_case_ )
_A : Optional[int] = bark_model(snake_case_,snake_case_ )
_A : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,):
_A : Optional[Any] = os.path.join(snake_case_,snake_case_ )
_A : Tuple = BarkSemanticConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : Dict = BarkCoarseConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : List[Any] = BarkFineConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : Tuple = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_A : str = BarkSemanticModel.from_pretrained(snake_case_ )
_A : Any = BarkCoarseModel.from_pretrained(snake_case_ )
_A : List[Any] = BarkFineModel.from_pretrained(snake_case_ )
_A : int = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_A : str = BarkConfig.from_sub_model_configs(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Any = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config,coarseAcoustic.generation_config,fineAcoustic.generation_config )
_A : int = BarkModel(snake_case_ )
_A : Any = semantic
_A : Any = coarseAcoustic
_A : Optional[int] = fineAcoustic
_A : Any = codec
_A : Optional[Any] = bark_generation_config
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
bark.save_pretrained(snake_case_,repo_id=snake_case_,push_to_hub=snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
_snake_case = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 343 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = """vit_msn"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-06 , lowerCAmelCase__=224 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = qkv_bias
| 113 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'README.md' )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
__UpperCamelCase = Path(__file__).resolve().parent.parent.parent
__UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = model_name.split('''-''')
__UpperCamelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 113 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
A : Union[str, Any] = random.Random()
def __lowerCAmelCase ( a__ , a__=1.0 , a__=None , a__=None ) -> Optional[int]:
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=400 , _snake_case=2_000 , _snake_case=24 , _snake_case=24 , _snake_case=0.0 , _snake_case=16_000 , _snake_case=True , _snake_case=True , ) -> Optional[Any]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = num_mel_bins
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False , _snake_case=False ) -> List[str]:
'''simple docstring'''
def _flatten(_snake_case ):
return list(itertools.chain(*a__ ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A( __a , unittest.TestCase ):
snake_case_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = SpeechaTextFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict:
'''simple docstring'''
self.assertTrue(np.all(np.mean(a__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(a__ , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test feature size
__a = feature_extractor(a__ , padding=a__ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__a = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(a__ , a__ , atol=1E-3 ) )
# Test batched
__a = feature_extractor(a__ , return_tensors='''np''' ).input_features
__a = feature_extractor(a__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(a__ )
__a = feature_extractor(a__ , return_tensors='''np''' ).input_features
__a = feature_extractor(a__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = ['''longest''', '''max_length''', '''do_not_pad''']
__a = [None, 16, None]
for max_length, padding in zip(a__ , a__ ):
__a = feature_extractor(
a__ , padding=a__ , max_length=a__ , return_attention_mask=a__ )
__a = inputs.input_features
__a = inputs.attention_mask
__a = [np.sum(a__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = ['''longest''', '''max_length''', '''do_not_pad''']
__a = [None, 16, None]
for max_length, padding in zip(a__ , a__ ):
__a = feature_extractor(
a__ , max_length=a__ , padding=a__ , return_tensors='''np''' , return_attention_mask=a__ )
__a = inputs.input_features
__a = inputs.attention_mask
__a = [np.sum(a__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = feature_extractor(
a__ , padding='''max_length''' , max_length=4 , truncation=a__ , return_tensors='''np''' , return_attention_mask=a__ , )
__a = inputs.input_features
__a = inputs.attention_mask
__a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = feature_extractor(
a__ , padding='''longest''' , max_length=4 , truncation=a__ , return_tensors='''np''' , return_attention_mask=a__ , )
__a = inputs.input_features
__a = inputs.attention_mask
__a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__a = feature_extractor(
a__ , padding='''longest''' , max_length=16 , truncation=a__ , return_tensors='''np''' , return_attention_mask=a__ , )
__a = inputs.input_features
__a = inputs.attention_mask
__a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 , 32 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__a = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
from datasets import load_dataset
__a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a = ds.sort('''id''' ).select(range(a__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__a = self._load_datasamples(1 )
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = feature_extractor(a__ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , a__ , atol=1E-4 ) )
| 359 |
from string import ascii_uppercase
A : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
A : Union[str, Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = 0
while True:
if x == i:
__a = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ) -> None:
__a = '''THE GERMAN ATTACK'''
__a = '''SECRET'''
__a = generate_key(a__ , a__ )
__a = cipher_text(a__ , a__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 33 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_UpperCamelCase : Any = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
lowercase = self.transformer_dir
shutil.copy(
os.path.join(snake_case_ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def UpperCamelCase_ ( self ):
lowercase = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
lowercase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
lowercase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowercase = black.format_str(snake_case_ , mode=snake_case_ )
lowercase = os.path.join(self.transformer_dir , 'new_code.py' )
with open(snake_case_ , 'w' , newline='\n' ) as f:
f.write(snake_case_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case_ )
with open(snake_case_ , 'r' ) as f:
self.assertTrue(f.read() , snake_case_ )
def UpperCamelCase_ ( self ):
lowercase = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(snake_case_ , snake_case_ )
def UpperCamelCase_ ( self ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , snake_case_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , snake_case_ ) , )
# Copy consistency with a really long name
lowercase = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub('Bert' , snake_case_ , snake_case_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , snake_case_ , overwrite_result=re.sub('Bert' , 'TestModel' , snake_case_ ) , )
def UpperCamelCase_ ( self ):
lowercase = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowercase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowercase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowercase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowercase , lowercase = check_copies.convert_to_localized_md(
snake_case_ , snake_case_ , localized_readme['format_model_list'] )
self.assertFalse(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
lowercase , lowercase = check_copies.convert_to_localized_md(
snake_case_ , snake_case_ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(snake_case_ )
lowercase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowercase = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowercase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowercase , lowercase = check_copies.convert_to_localized_md(
snake_case_ , snake_case_ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(snake_case_ , snake_case_ )
| 220 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22 | 0 |
'''simple docstring'''
def a__ ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase, lowercase ):
raise TypeError('''Input value must be an \'int\' type''' )
_UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase : int, lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCamelCase = update_area_of_max_square(lowercase, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCamelCase = update_area_of_max_square_using_dp_array(lowercase, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, lowercase, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
_UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
_UpperCamelCase = [[-1] * cols for _ in range(lowercase )]
update_area_of_max_square_using_dp_array(0, 0, lowercase )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = dp_array[row][col + 1]
_UpperCamelCase = dp_array[row + 1][col + 1]
_UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(dp_array[row][col], lowercase )
else:
_UpperCamelCase = 0
return largest_square_area
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = current_row[col + 1]
_UpperCamelCase = next_row[col + 1]
_UpperCamelCase = next_row[col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(current_row[col], lowercase )
else:
_UpperCamelCase = 0
_UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 287 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( a ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(_snake_case):
self.assertDictEqual(_snake_case , example_records[i])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
UpperCAmelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def lowerCamelCase ( self : Optional[Any]): # checks what happens with missing columns
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def lowerCamelCase ( self : Any): # checks if the type can be inferred from the second record
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = Dataset.from_list([])
self.assertEqual(len(_snake_case) , 0)
self.assertListEqual(dset.column_names , [])
| 51 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __UpperCAmelCase :
def __init__( self : Union[str, Any], __A : List[Any], __A : Optional[Any]=sys.maxsize ):
UpperCAmelCase : Union[str, Any] = '''bilinear'''
UpperCAmelCase : int = max_size
UpperCAmelCase : Tuple = short_edge_length
def __call__( self : Tuple, __A : Dict ):
UpperCAmelCase : int = []
for img in imgs:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase : List[Any] = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase : int = size * 1.0 / min(__a, __a )
if h < w:
UpperCAmelCase , UpperCAmelCase : Dict = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase : str = scale * h, size
if max(__a, __a ) > self.max_size:
UpperCAmelCase : Any = self.max_size * 1.0 / max(__a, __a )
UpperCAmelCase : Tuple = newh * scale
UpperCAmelCase : Optional[Any] = neww * scale
UpperCAmelCase : Optional[int] = int(neww + 0.5 )
UpperCAmelCase : List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase : Union[str, Any] = Image.fromarray(__a )
UpperCAmelCase : List[str] = pil_image.resize((neww, newh), PILImageResampling.BILINEAR )
UpperCAmelCase : Tuple = np.asarray(__a )
else:
UpperCAmelCase : Union[str, Any] = img.permute(2, 0, 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase : int = nn.functional.interpolate(
__a, (newh, neww), mode=self.interp_method, align_corners=__a ).squeeze(0 )
img_augs.append(__a )
return img_augs
class __UpperCAmelCase :
def __init__( self : int, __A : List[str] ):
UpperCAmelCase : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase : Tuple = cfg.INPUT.FORMAT
UpperCAmelCase : str = cfg.SIZE_DIVISIBILITY
UpperCAmelCase : List[Any] = cfg.PAD_VALUE
UpperCAmelCase : Dict = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase : str = cfg.MODEL.DEVICE
UpperCAmelCase : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
UpperCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
UpperCAmelCase : Union[str, Any] = lambda __A : (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict, __A : Optional[Any] ):
UpperCAmelCase : Optional[int] = tuple(max(__a ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase : List[Any] = [im.shape[-2:] for im in images]
UpperCAmelCase : Any = [
nn.functional.pad(
__a, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, )
for size, im in zip(__a, __a )
]
return torch.stack(__a ), torch.tensor(__a )
def __call__( self : Any, __A : Any, __A : Tuple=False ):
with torch.no_grad():
if not isinstance(__a, __a ):
UpperCAmelCase : Optional[Any] = [images]
if single_image:
assert len(__a ) == 1
for i in range(len(__a ) ):
if isinstance(images[i], torch.Tensor ):
images.insert(__a, images.pop(__a ).to(self.device ).float() )
elif not isinstance(images[i], torch.Tensor ):
images.insert(
__a, torch.as_tensor(img_tensorize(images.pop(__a ), input_format=self.input_format ) )
.to(self.device )
.float(), )
# resize smallest edge
UpperCAmelCase : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase : int = self.aug(__a )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase : Optional[int] = [self.normalizer(__a ) for x in images]
# now pad them to do the following operations
UpperCAmelCase , UpperCAmelCase : Dict = self.pad(__a )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase : str = torch.true_divide(__a, __a )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ) -> Any:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase , UpperCAmelCase : List[str] = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case )
| 361 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCamelCase : int = datasets.logging.get_logger(__name__)
_lowerCamelCase : Any = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_lowerCamelCase : List[str] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def a__ ( UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[str]="dummy_doc" ) -> Any:
UpperCAmelCase : Dict = {doc: key_lines}
UpperCAmelCase : Any = {doc: sys_lines}
UpperCAmelCase : int = {}
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = 0
UpperCAmelCase : List[str] = 0
UpperCAmelCase , UpperCAmelCase : Tuple = reader.get_doc_mentions(UpperCAmelCase , key_doc_lines[doc] , UpperCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase : int = reader.set_annotated_parse_trees(UpperCAmelCase , key_doc_lines[doc] , UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase , UpperCAmelCase : str = reader.get_doc_mentions(UpperCAmelCase , sys_doc_lines[doc] , UpperCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase : Tuple = reader.set_annotated_parse_trees(UpperCAmelCase , key_doc_lines[doc] , UpperCAmelCase , UpperCAmelCase )
if remove_nested:
UpperCAmelCase , UpperCAmelCase : Tuple = reader.remove_nested_coref_mentions(UpperCAmelCase , UpperCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase , UpperCAmelCase : List[str] = reader.remove_nested_coref_mentions(UpperCAmelCase , UpperCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase : Union[str, Any] = reader.get_mention_assignments(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Optional[int] = reader.get_mention_assignments(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'''files, respectively''' )
return doc_coref_infos
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = get_coref_infos(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Dict = {}
UpperCAmelCase : Dict = 0
UpperCAmelCase : Tuple = 0
for name, metric in metrics:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = evaluator.evaluate_documents(UpperCAmelCase , UpperCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
UpperCAmelCase : Optional[int] = (conll / 3) * 100
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def a__ ( UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : int = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase : List[str] = line.split()[5]
if not parse_col == "-":
UpperCAmelCase : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def __magic_name__ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ), codebase_urls=['''https://github.com/ns-moosavi/coval'''], reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
], )
def __magic_name__ ( self : str, __A : Any, __A : Dict, __A : List[str]=True, __A : Optional[Any]=False, __A : List[Any]=False, __A : Tuple=False ):
UpperCAmelCase : Optional[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase : List[Any] = util.check_gold_parse_annotation(__A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase : Union[str, Any] = evaluate(
key_lines=__A, sys_lines=__A, metrics=__A, NP_only=__A, remove_nested=__A, keep_singletons=__A, min_span=__A, )
return score
| 99 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
'''simple docstring'''
__lowerCamelCase : jnp.ndarray
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , _lowercase , _lowercase ):
'''simple docstring'''
__lowerCamelCase : int = 32
__lowerCamelCase : int = 4
__lowerCamelCase : int = 4
__lowerCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowerCamelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__lowerCamelCase : Union[bool, Tuple[bool]] = False
__lowerCamelCase : Tuple[int] = (320, 640, 1280, 1280)
__lowerCamelCase : int = 2
__lowerCamelCase : Union[int, Tuple[int]] = 8
__lowerCamelCase : Optional[Union[int, Tuple[int]]] = None
__lowerCamelCase : int = 1280
__lowerCamelCase : float = 0.0
__lowerCamelCase : bool = False
__lowerCamelCase : jnp.dtype = jnp.floataa
__lowerCamelCase : bool = True
__lowerCamelCase : int = 0
__lowerCamelCase : bool = False
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# init input tensors
A : str = (1, self.in_channels, self.sample_size, self.sample_size)
A : int = jnp.zeros(_SCREAMING_SNAKE_CASE, dtype=jnp.floataa )
A : Dict = jnp.ones((1,), dtype=jnp.intaa )
A : int = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
A : int = jax.random.split(_SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )["params"]
def _lowerCAmelCase ( self ):
A : Optional[int] = self.block_out_channels
A : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A : Optional[int] = self.num_attention_heads or self.attention_head_dim
# input
A : Any = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
A : Optional[int] = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
A : Dict = FlaxTimestepEmbedding(_SCREAMING_SNAKE_CASE, dtype=self.dtype )
A : Union[str, Any] = self.only_cross_attention
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
A : Optional[int] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
A : str = (num_attention_heads,) * len(self.down_block_types )
# down
A : Tuple = []
A : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
A : Dict = output_channel
A : List[str] = block_out_channels[i]
A : Any = i == len(_SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE, out_channels=_SCREAMING_SNAKE_CASE, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
A : Dict = FlaxDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE, out_channels=_SCREAMING_SNAKE_CASE, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(_SCREAMING_SNAKE_CASE )
A : Any = down_blocks
# mid
A : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1], dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
# up
A : List[Any] = []
A : Dict = list(reversed(_SCREAMING_SNAKE_CASE ) )
A : Optional[int] = list(reversed(_SCREAMING_SNAKE_CASE ) )
A : List[str] = list(reversed(_SCREAMING_SNAKE_CASE ) )
A : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
A : Optional[int] = output_channel
A : Optional[int] = reversed_block_out_channels[i]
A : Dict = reversed_block_out_channels[min(i + 1, len(_SCREAMING_SNAKE_CASE ) - 1 )]
A : str = i == len(_SCREAMING_SNAKE_CASE ) - 1
if up_block_type == "CrossAttnUpBlock2D":
A : Optional[Any] = FlaxCrossAttnUpBlockaD(
in_channels=_SCREAMING_SNAKE_CASE, out_channels=_SCREAMING_SNAKE_CASE, prev_output_channel=_SCREAMING_SNAKE_CASE, num_layers=self.layers_per_block + 1, num_attention_heads=reversed_num_attention_heads[i], add_upsample=not is_final_block, dropout=self.dropout, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
A : Optional[Any] = FlaxUpBlockaD(
in_channels=_SCREAMING_SNAKE_CASE, out_channels=_SCREAMING_SNAKE_CASE, prev_output_channel=_SCREAMING_SNAKE_CASE, num_layers=self.layers_per_block + 1, add_upsample=not is_final_block, dropout=self.dropout, dtype=self.dtype, )
up_blocks.append(_SCREAMING_SNAKE_CASE )
A : Optional[int] = output_channel
A : List[Any] = up_blocks
# out
A : List[Any] = nn.GroupNorm(num_groups=32, epsilon=1e-5 )
A : int = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__ = True, lowerCamelCase__ = False, ):
# 1. time
if not isinstance(_SCREAMING_SNAKE_CASE, jnp.ndarray ):
A : Dict = jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(_SCREAMING_SNAKE_CASE, jnp.ndarray ) and len(timesteps.shape ) == 0:
A : Tuple = timesteps.astype(dtype=jnp.floataa )
A : List[str] = jnp.expand_dims(_SCREAMING_SNAKE_CASE, 0 )
A : int = self.time_proj(_SCREAMING_SNAKE_CASE )
A : int = self.time_embedding(_SCREAMING_SNAKE_CASE )
# 2. pre-process
A : Union[str, Any] = jnp.transpose(_SCREAMING_SNAKE_CASE, (0, 2, 3, 1) )
A : Tuple = self.conv_in(_SCREAMING_SNAKE_CASE )
# 3. down
A : Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = down_block(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, deterministic=not train )
else:
A : Any = down_block(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
A : List[str] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
A : Dict = new_down_block_res_samples
# 4. mid
A : Dict = self.mid_block(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
A : Any = down_block_res_samples[-(self.layers_per_block + 1) :]
A : Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
A : int = up_block(
_SCREAMING_SNAKE_CASE, temb=_SCREAMING_SNAKE_CASE, encoder_hidden_states=_SCREAMING_SNAKE_CASE, res_hidden_states_tuple=_SCREAMING_SNAKE_CASE, deterministic=not train, )
else:
A : int = up_block(_SCREAMING_SNAKE_CASE, temb=_SCREAMING_SNAKE_CASE, res_hidden_states_tuple=_SCREAMING_SNAKE_CASE, deterministic=not train )
# 6. post-process
A : Tuple = self.conv_norm_out(_SCREAMING_SNAKE_CASE )
A : Optional[Any] = nn.silu(_SCREAMING_SNAKE_CASE )
A : str = self.conv_out(_SCREAMING_SNAKE_CASE )
A : str = jnp.transpose(_SCREAMING_SNAKE_CASE, (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_SCREAMING_SNAKE_CASE )
| 116 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase__ : Union[str, Any] = remove_duplicates(key.upper() )
lowerCAmelCase__ : Dict = len(_a )
# First fill cipher with key characters
lowerCAmelCase__ : Any = {alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 26 ):
lowerCAmelCase__ : List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase__ : str = alphabet[i - offset]
lowerCAmelCase__ : Dict = char
return cipher_alphabet
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = input('''Enter message to encode or decode: ''' ).strip()
lowerCAmelCase__ : Tuple = input('''Enter keyword: ''' ).strip()
lowerCAmelCase__ : List[str] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCAmelCase__ : List[Any] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCAmelCase__ : Dict = create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 131 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__a = np.concatenate(__lowerCamelCase , axis=0 )
__a = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_55.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(__lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(__lowerCamelCase , dim=0 )
return image
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.99_95 ):
if not isinstance(__lowerCamelCase , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(__lowerCamelCase ) * np.linalg.norm(__lowerCamelCase )) )
if np.abs(__lowerCamelCase ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(__lowerCamelCase )
__a = np.sin(__lowerCamelCase )
__a = theta_a * t
__a = np.sin(__lowerCamelCase )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
return va
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = F.normalize(__lowerCamelCase , dim=-1 )
__a = F.normalize(__lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
for param in model.parameters():
__a = value
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , clip_model=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , feature_extractor=UpperCAmelCase , coca_model=UpperCAmelCase , coca_tokenizer=UpperCAmelCase , coca_transform=UpperCAmelCase , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCAmelCase )
set_requires_grad(self.clip_model , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
self.enable_attention_slicing(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
set_requires_grad(self.vae , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
set_requires_grad(self.vae , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
set_requires_grad(self.unet , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
set_requires_grad(self.unet , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , UpperCAmelCase )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if not isinstance(UpperCAmelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase )}''' )
__a = image.to(device=UpperCAmelCase , dtype=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase )
]
__a = torch.cat(UpperCAmelCase , dim=0 )
else:
__a = self.vae.encode(UpperCAmelCase ).latent_dist.sample(UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.18_215 * init_latents
__a = init_latents.repeat_interleave(UpperCAmelCase , dim=0 )
__a = randn_tensor(init_latents.shape , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
# get latents
__a = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = init_latents
return latents
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Tuple:
__a = self.coca_transform(UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
__a = self.feature_extractor.preprocess(UpperCAmelCase )
__a = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(UpperCAmelCase )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase )
__a = image_embeddings_clip.repeat_interleave(UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Tuple:
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
__a = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(UpperCAmelCase )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCAmelCase ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.18_215 * sample
__a = self.vae.decode(UpperCAmelCase ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase )
__a = self.normalize(UpperCAmelCase ).to(latents.dtype )
__a = self.clip_model.get_image_features(UpperCAmelCase )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase )
__a = spherical_dist_loss(UpperCAmelCase , UpperCAmelCase ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(UpperCAmelCase , UpperCAmelCase )[0]
if isinstance(self.scheduler , UpperCAmelCase ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 0.6 , UpperCAmelCase = 5_0 , UpperCAmelCase = 7.5 , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 1_0_0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = 0.8 , UpperCAmelCase = 0.1 , UpperCAmelCase = 0.1 , ) -> Union[str, Any]:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(UpperCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(UpperCAmelCase , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = ', '.join(UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(UpperCAmelCase )
if style_prompt is None:
if len(UpperCAmelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(UpperCAmelCase )
# get prompt text embeddings for content and style
__a = self.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors='pt' , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors='pt' , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(UpperCAmelCase , dim=0 )
# set timesteps
__a = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(UpperCAmelCase , **UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(UpperCAmelCase , UpperCAmelCase , self.device )
__a = timesteps[:1].repeat(UpperCAmelCase )
# Preprocess image
__a = preprocess(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = self.prepare_latents(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , text_embeddings.dtype , self.device , UpperCAmelCase )
__a = preprocess(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = self.prepare_latents(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , text_embeddings.dtype , self.device , UpperCAmelCase )
__a = slerp(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(UpperCAmelCase , UpperCAmelCase )
__a = self.get_clip_image_embeddings(UpperCAmelCase , UpperCAmelCase )
__a = slerp(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([''] , padding='max_length' , max_length=UpperCAmelCase , return_tensors='pt' )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device='cpu' , dtype=UpperCAmelCase ).to(
self.device )
else:
__a = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=UpperCAmelCase ):
for i, t in enumerate(UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
__a = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.18_215 * latents
__a = self.vae.decode(UpperCAmelCase ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase , nsfw_content_detected=UpperCAmelCase )
| 197 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Dict = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( A_ ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , UpperCamelCase = 1 , UpperCamelCase = None , UpperCamelCase = 0.0 , UpperCamelCase = 50 , UpperCamelCase = "pil" , UpperCamelCase = True , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowerCamelCase , )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
lowerCamelCase_ = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ = self.vqvae.decode(_lowerCamelCase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 55 |
"""simple docstring"""
from math import pow
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
return current_sum, solutions_count
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
a = np.array([[1, item, train_mtch[i]] for i, item in enumerate(snake_case_ )] )
a = np.array(snake_case_ )
a = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), snake_case_ ) ), x.transpose() ), snake_case_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
a = (1, 2, 1)
a = (1, 1, 0, 7)
a = SARIMAX(
snake_case_, exog=snake_case_, order=snake_case_, seasonal_order=snake_case_ )
a = model.fit(disp=snake_case_, maxiter=6_0_0, method='''nm''' )
a = model_fit.predict(1, len(snake_case_ ), exog=[test_match] )
return result[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
a = SVR(kernel='''rbf''', C=1, gamma=0.1, epsilon=0.1 )
regressor.fit(snake_case_, snake_case_ )
a = regressor.predict(snake_case_ )
return y_pred[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
"""simple docstring"""
train_user.sort()
a = np.percentile(snake_case_, 2_5 )
a = np.percentile(snake_case_, 7_5 )
a = qa - qa
a = qa - (iqr * 0.1)
return low_lim
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> bool:
"""simple docstring"""
a = 0
a = 0
for i in list_vote:
if i > actual_result:
a = not_safe + 1
else:
if abs(abs(snake_case_ ) - abs(snake_case_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCamelCase__ : str = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCamelCase__ : Tuple = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
UpperCamelCase__ : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCamelCase__ : List[Any] = normalize_df[:, 2].tolist()
UpperCamelCase__ : int = normalize_df[:, 0].tolist()
UpperCamelCase__ : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCamelCase__ : Optional[int] = normalize_df[:, [1, 2]].tolist()
UpperCamelCase__ : List[Any] = x[: len(x) - 1]
UpperCamelCase__ : List[str] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCamelCase__ : Optional[int] = total_date[: len(total_date) - 1]
UpperCamelCase__ : int = total_user[: len(total_user) - 1]
UpperCamelCase__ : Any = total_match[: len(total_match) - 1]
UpperCamelCase__ : int = total_date[len(total_date) - 1 :]
UpperCamelCase__ : Optional[int] = total_user[len(total_user) - 1 :]
UpperCamelCase__ : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCamelCase__ : List[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCamelCase__ : Any = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 330 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = ''''''
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = ''''''
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = int('''0b''' + data[0] + data[-1], 2 )
a = int('''0b''' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = message[:4]
a = message[4:]
a = apply_table(snake_case_, snake_case_ )
a = xor(snake_case_, snake_case_ )
a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741
a = apply_sbox(snake_case_, temp[4:] )
a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741
a = '''0''' * (2 - len(snake_case_ )) + r
a = apply_table(l + r, snake_case_ )
a = xor(snake_case_, snake_case_ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ : int = input("""Enter 10 bit key: """)
UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """)
UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ : Optional[int] = [2, 4, 3, 1]
UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table)
UpperCamelCase__ : str = temp[:5]
UpperCamelCase__ : List[Any] = temp[5:]
UpperCamelCase__ : Dict = left_shift(left)
UpperCamelCase__ : Any = left_shift(right)
UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : int = left_shift(right)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : Dict = left_shift(right)
UpperCamelCase__ : List[str] = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ : Tuple = apply_table(message, IP)
UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4]
UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP)
UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4]
UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 330 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 45 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Dict =LxmertTokenizer
A__ : List[Any] =LxmertTokenizerFast
A__ : Any =True
A__ : List[Any] =True
def A_ ( self : Optional[Any] ):
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE__ = 'unwanted, running'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 176 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = tempfile.mkdtemp()
__A : Any = BlipImageProcessor()
__A : Optional[int] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__A : Dict = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__A : str = InstructBlipProcessor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).qformer_tokenizer
def UpperCamelCase__( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Dict = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__A : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__A : Optional[int] = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
__A : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
self.assertIsInstance(processor.qformer_tokenizer , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Any = self.get_qformer_tokenizer()
__A : Optional[int] = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
__A : str = self.prepare_image_inputs()
__A : Any = image_processor(__lowerCamelCase , return_tensors='''np''' )
__A : Dict = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : Union[str, Any] = self.get_qformer_tokenizer()
__A : Dict = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
__A : Dict = '''lower newer'''
__A : Dict = processor(text=__lowerCamelCase )
__A : int = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
__A : Any = qformer_tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = self.get_qformer_tokenizer()
__A : Union[str, Any] = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
__A : List[str] = '''lower newer'''
__A : List[Any] = self.prepare_image_inputs()
__A : Dict = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : str = self.get_qformer_tokenizer()
__A : str = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
__A : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Optional[Any] = processor.batch_decode(__lowerCamelCase )
__A : int = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : List[Any] = self.get_qformer_tokenizer()
__A : Any = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
__A : Union[str, Any] = '''lower newer'''
__A : Any = self.prepare_image_inputs()
__A : Any = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 366 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ) ->Optional[Any]:
'''simple docstring'''
stooge(snake_case_ ,0 ,len(snake_case_ ) - 1 )
return arr
def __lowercase ( snake_case_ : Optional[Any] ,snake_case_ : Union[str, Any] ,snake_case_ : Any ) ->Tuple:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__A , __A : Optional[int] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__A : Any = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case_ ,snake_case_ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case_ ,i + t ,(snake_case_) )
# Recursively sort first 2/3 elements
stooge(snake_case_ ,snake_case_ ,(h - t) )
if __name__ == "__main__":
a_ = input("""Enter numbers separated by a comma:\n""").strip()
a_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 291 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ : Dict = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowercase__ : Union[str, Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __lowercase : Dict=None , __lowercase : Optional[int]=None , __lowercase : Any=None , __lowercase : Optional[int]="replace" , __lowercase : List[Any]="<s>" , __lowercase : int="</s>" , __lowercase : List[str]="</s>" , __lowercase : str="<s>" , __lowercase : str="<unk>" , __lowercase : Dict="<pad>" , __lowercase : str="<mask>" , __lowercase : List[Any]=False , __lowercase : Tuple=True , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
snake_case_ = getattr(lowercase_ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowercase_ )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["sep"] )
if "cls" in state:
snake_case_ = tuple(state["cls"] )
snake_case_ = False
if state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("trim_offsets" , lowercase_ ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowercase_ , state.pop("type" ) )
snake_case_ = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def snake_case__ ( self : Any ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : Tuple , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
snake_case_ = value
def snake_case__ ( self : Union[str, Any] , *__lowercase : Union[str, Any] , **__lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def snake_case__ ( self : List[Any] , *__lowercase : int , **__lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase_ , **lowercase_ )
def snake_case__ ( self : Union[str, Any] , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def snake_case__ ( self : List[str] , __lowercase : str , __lowercase : Any=None ):
"""simple docstring"""
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : str , __lowercase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowercase : Optional[int] = None , __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
snake_case_ = super()._pad(
encoded_inputs=lowercase_ , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
# Load from model defaults
if return_attention_mask is None:
snake_case_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case_ = len(encoded_inputs["global_attention_mask"] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case_ = len(lowercase_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 187 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,)
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = TFViTModel(config=lowercase_ )
A__ = model(lowercase_,training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
A__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFViTForImageClassification(lowercase_ )
A__ = model(lowercase_,labels=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = TFViTForImageClassification(lowercase_ )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
A__ = TFViTModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
pass
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) )
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> str:
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase_,return_tensors='tf' )
# forward pass
A__ = model(**lowercase_ )
# verify the logits
A__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
| 7 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
_snake_case = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_snake_case = n - k
# Calculate C(n,k)
for i in range(__lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return binomial_coefficient(2 * node_count , __lowerCamelCase ) // (node_count + 1)
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
_snake_case = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCAmelCase ( __lowerCamelCase : int ) -> int:
return catalan_number(__lowerCamelCase ) * factorial(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
F"binary trees and {catalan_number(node_count)} binary search trees."
)
| 360 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase__ = {
'allenai/led-base-16384': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[Any]:
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]="replace" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : Any="<pad>" , _lowerCamelCase : Union[str, Any]="<mask>" , _lowerCamelCase : Optional[int]=False , **_lowerCamelCase : str , ):
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
_snake_case = json.load(_lowerCamelCase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
_snake_case = merges_handle.read().split('''\n''' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase ( self : Tuple ):
return len(self.encoder )
def lowercase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : Dict , _lowerCamelCase : str ):
if token in self.cache:
return self.cache[token]
_snake_case = tuple(_lowerCamelCase )
_snake_case = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
_snake_case = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(_lowerCamelCase ):
try:
_snake_case = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(_lowerCamelCase )
_snake_case = new_word
if len(_lowerCamelCase ) == 1:
break
else:
_snake_case = get_pairs(_lowerCamelCase )
_snake_case = ''' '''.join(_lowerCamelCase )
_snake_case = word
return word
def lowercase ( self : str , _lowerCamelCase : Dict ):
_snake_case = []
for token in re.findall(self.pat , _lowerCamelCase ):
_snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[str] ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase ( self : Optional[int] , _lowerCamelCase : Dict ):
return self.decoder.get(_lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any] ):
_snake_case = ''''''.join(_lowerCamelCase )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' )
_snake_case = 0
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_snake_case = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Any=False , **_lowerCamelCase : List[Any] ):
_snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
_snake_case = ''' ''' + text
return (text, kwargs)
def lowercase ( self : int , _lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , ):
_snake_case = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
_snake_case = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 40 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[str] = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class snake_case ( lowercase , lowercase ):
"""simple docstring"""
_lowerCamelCase = "nat"
_lowerCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=64 , UpperCamelCase=[3, 4, 6, 5] , UpperCamelCase=[2, 4, 8, 16] , UpperCamelCase=7 , UpperCamelCase=3.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase=0.0 , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(UpperCamelCase )
lowerCamelCase_ = num_heads
lowerCamelCase_ = kernel_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) )
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase ) + 1 )]
lowerCamelCase_ ,lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
| 55 | """simple docstring"""
from math import pow
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class _lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : str , **_A : int ) -> str:
super().__init__(**__lowerCamelCase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(__lowerCamelCase )
def __call__( self : Optional[int] , _A : int , _A : Dict = None , **_A : int , ) -> Dict:
if "text_queries" in kwargs:
__magic_name__ : int = kwargs.pop('text_queries' )
if isinstance(__lowerCamelCase , (str, Image.Image) ):
__magic_name__ : List[Any] = {'image': image, 'candidate_labels': candidate_labels}
else:
__magic_name__ : Any = image
__magic_name__ : Tuple = super().__call__(__lowerCamelCase , **__lowerCamelCase )
return results
def __lowerCAmelCase ( self : Optional[Any] , **_A : List[Any] ) -> List[str]:
__magic_name__ : int = {}
if "threshold" in kwargs:
__magic_name__ : str = kwargs['threshold']
if "top_k" in kwargs:
__magic_name__ : List[Any] = kwargs['top_k']
return {}, {}, postprocess_params
def __lowerCAmelCase ( self : List[Any] , _A : Any ) -> Tuple:
__magic_name__ : Union[str, Any] = load_image(inputs['image'] )
__magic_name__ : List[str] = inputs['candidate_labels']
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__magic_name__ : Dict = candidate_labels.split(',' )
__magic_name__ : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCamelCase ):
__magic_name__ : List[Any] = self.tokenizer(__lowerCamelCase , return_tensors=self.framework )
__magic_name__ : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __lowerCAmelCase ( self : Dict , _A : Union[str, Any] ) -> int:
__magic_name__ : List[str] = model_inputs.pop('target_size' )
__magic_name__ : List[str] = model_inputs.pop('candidate_label' )
__magic_name__ : str = model_inputs.pop('is_last' )
__magic_name__ : Tuple = self.model(**__lowerCamelCase )
__magic_name__ : Optional[int] = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : int , _A : List[str]=0.1 , _A : Any=None ) -> List[Any]:
__magic_name__ : Optional[Any] = []
for model_output in model_outputs:
__magic_name__ : Optional[Any] = model_output['candidate_label']
__magic_name__ : Dict = BaseModelOutput(__lowerCamelCase )
__magic_name__ : Dict = self.image_processor.post_process_object_detection(
outputs=__lowerCamelCase , threshold=__lowerCamelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
__magic_name__ : Dict = outputs['scores'][index].item()
__magic_name__ : List[str] = self._get_bounding_box(outputs['boxes'][index][0] )
__magic_name__ : List[str] = {'score': score, 'label': label, 'box': box}
results.append(__lowerCamelCase )
__magic_name__ : Dict = sorted(__lowerCamelCase , key=lambda _A : x["score"] , reverse=__lowerCamelCase )
if top_k:
__magic_name__ : Any = results[:top_k]
return results
def __lowerCAmelCase ( self : List[str] , _A : Any ) -> Any:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = box.int().tolist()
__magic_name__ : Dict = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox | 367 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase )
if number < 0:
return False
__magic_name__ : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 275 | 0 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__snake_case = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__snake_case = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__snake_case = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ),
} | 97 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def A ( self : Any ) -> Any:
return 32
@property
def A ( self : Optional[int] ) -> Any:
return 32
@property
def A ( self : Dict ) -> int:
return self.time_input_dim
@property
def A ( self : Tuple ) -> str:
return self.time_input_dim * 4
@property
def A ( self : Any ) -> str:
return 1_00
@property
def A ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : Dict = UNetaDConditionModel(**A )
return model
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
lowercase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Tuple = self.dummy_unet
lowercase_ : int = self.dummy_movq
lowercase_ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase_ : str = DDIMScheduler(**A )
lowercase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , A : int , A : List[str]=0 ) -> int:
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowercase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('''mps''' ):
lowercase_ : Optional[Any] = torch.manual_seed(A )
else:
lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowercase_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Any ) -> List[Any]:
lowercase_ : List[str] = '''cpu'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**A )
lowercase_ : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(A ) )
lowercase_ : str = output.images
lowercase_ : int = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : Optional[int] = init_image.resize((5_12, 5_12) )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase_ : Optional[int] = torch.from_numpy(np.array(A ) ).float() / 255.0
lowercase_ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase_ : Optional[Any] = '''A robot, 4k photo'''
lowercase_ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowercase_ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase_ : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowercase_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : int = pipe_prior(
A , image=A , strength=0.85 , generator=A , negative_prompt='''''' , ).to_tuple()
lowercase_ : str = pipeline(
image=A , image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A )
| 33 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ : str = grid[0]
for row_n in range(1 , len(UpperCamelCase ) ):
lowerCamelCase__ : Tuple = grid[row_n]
lowerCamelCase__ : Optional[int] = fill_row(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_A : int =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_A : Optional[Any] =cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ : int = cn.convert_to_negative(UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase__ : Union[str, Any] = canny.canny(UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE_ () -> str:
assert gg.gaussian_filter(UpperCamelCase , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE_ () -> int:
# laplace diagonals
lowerCamelCase__ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase__ : int = conv.img_convolve(UpperCamelCase , UpperCamelCase ).astype(UpperCamelCase )
assert res.any()
def SCREAMING_SNAKE_CASE_ () -> Any:
assert med.median_filter(UpperCamelCase , 3 ).any()
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = sob.sobel_filter(UpperCamelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Union[str, Any] = sp.make_sepia(UpperCamelCase , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = bs.Burkes(imread(UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
lowerCamelCase__ : int = rs.NearestNeighbour(imread(UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
lowerCamelCase__ : Tuple = imread(UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase__ : int = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Optional[Any] = image[x_coordinate][y_coordinate]
lowerCamelCase__ : str = lbp.get_neighbors_pixel(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase__ : List[str] = lbp.local_binary_value(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert lbp_image.any()
| 129 | 0 |
'''simple docstring'''
__a: Dict = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = set()
# keep track of all the paths to be checked
lowercase__ : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__ : Any = queue.pop(0 )
# get the last node from the path
lowercase__ : Union[str, Any] = path[-1]
if node not in explored:
lowercase__ : List[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__ : str = list(UpperCAmelCase )
new_path.append(UpperCAmelCase )
queue.append(UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__ : Optional[Any] = [start]
lowercase__ : List[Any] = set(UpperCAmelCase )
# Keep tab on distances from `start` node.
lowercase__ : int = {start: 0, target: -1}
while queue:
lowercase__ : List[Any] = queue.pop(0 )
if node == target:
lowercase__ : Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCAmelCase )
queue.append(UpperCAmelCase )
lowercase__ : Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 198 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase: List[str] = logging.get_logger(__name__)
_lowercase: Union[str, Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "roberta-prelayernorm"
def __init__(self , lowerCamelCase_=50265 , lowerCamelCase_=768 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 71 |
def a( A : int , A : float , A : float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def a( A : float , A : float , A : float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def a( A : float , A : float , A : float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def a( A : float , A : float , A : float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def a ( __a ) -> datetime:
'''simple docstring'''
UpperCamelCase__ :Tuple = year % 19
UpperCamelCase__ :Optional[Any] = year % 4
UpperCamelCase__ :Dict = year % 7
UpperCamelCase__ :Union[str, Any] = math.floor(year / 100 )
UpperCamelCase__ :List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase__ :List[str] = leap_day_inhibits / 4
UpperCamelCase__ :Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase__ :List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase__ :List[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase__ :Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__snake_case = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""") | 97 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def A_ ( A__ ) -> List[str]:
a__ : Union[str, Any] = torch.load(A__ , map_location='cpu' )
if "model" in sd.keys():
a__ : Tuple = torch.load(A__ , map_location='cpu' )['model']
# pop unnecessary weights
a__ : Optional[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A__ )
a__ : Union[str, Any] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a__ : Any = sd.pop(A__ )
a__ : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a__ : str = sd[key]
# We split QKV in separate Q,K,V
a__ : Union[str, Any] = key.replace('.qkv_proj.' , '.q_proj.' )
a__ : int = key.replace('.qkv_proj.' , '.k_proj.' )
a__ : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
a__ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a__ , a__ , a__ : Any = torch.split(A__ , depth // 3 , dim=0 )
a__ : int = q
a__ : List[Any] = k
a__ : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( A__ , A__ , A__=None ) -> Union[str, Any]:
a__ : Union[str, Any] = load_checkpoint(A__ )
if config is not None:
a__ : List[Any] = OPTConfig.from_pretrained(A__ )
else:
a__ : int = OPTConfig()
a__ : Optional[int] = OPTModel(A__ ).half().eval()
model.load_state_dict(A__ )
# Check results
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 99 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Tuple = ["""pixel_values"""]
def __init__( self , a = True , a = None , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 255 , a = True , a = None , a = None , **a , ):
super().__init__(**a)
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 384}
lowercase__ : Union[str, Any] = get_size_dict(a , default_to_square=a)
lowercase__ : List[Any] = do_resize
lowercase__ : Tuple = size
# Default value set here for backwards compatibility where the value in config is None
lowercase__ : str = crop_pct if crop_pct is not None else 224 / 256
lowercase__ : Optional[Any] = resample
lowercase__ : int = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , a , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
lowercase__ : Tuple = get_size_dict(a , default_to_square=a)
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""")
lowercase__ : Union[str, Any] = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase__ : List[str] = int(shortest_edge / crop_pct)
lowercase__ : Union[str, Any] = get_resize_output_image_size(a , size=a , default_to_square=a)
lowercase__ : Union[str, Any] = resize(image=a , size=a , resample=a , data_format=a , **a)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a , size=(shortest_edge, shortest_edge) , data_format=a , **a)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a , size=(shortest_edge, shortest_edge) , resample=a , data_format=a , **a)
def snake_case_ ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a)
def snake_case_ ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a)
def snake_case_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ : Optional[int] = resample if resample is not None else self.resample
lowercase__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase__ : int = image_std if image_std is not None else self.image_std
lowercase__ : Any = size if size is not None else self.size
lowercase__ : Tuple = get_size_dict(a , default_to_square=a)
lowercase__ : Optional[Any] = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
lowercase__ : Tuple = [to_numpy_array(a) for image in images]
if do_resize:
lowercase__ : Any = [self.resize(image=a , size=a , crop_pct=a , resample=a) for image in images]
if do_rescale:
lowercase__ : List[str] = [self.rescale(image=a , scale=a) for image in images]
if do_normalize:
lowercase__ : Dict = [self.normalize(image=a , mean=a , std=a) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(a , a) for image in images]
lowercase__ : List[str] = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 216 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
snake_case_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
lowercase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[int] = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : Tuple = value
elif weight_type == "bias":
lowercase__ : Any = value
else:
lowercase__ : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : Tuple = True
if "*" in mapped_key:
lowercase__ : Any = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
lowercase__ : Optional[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
lowercase__ : int = 'weight_g'
elif "weight_v" in name:
lowercase__ : Any = 'weight_v'
elif "bias" in name:
lowercase__ : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : Union[str, Any] = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = full_name.split('conv_layers.' )[-1]
lowercase__ : Dict = name.split('.' )
lowercase__ : List[str] = int(items[0] )
lowercase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Dict = UniSpeechSatConfig()
lowercase__ : str = ''
if is_finetuned:
lowercase__ : Any = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Optional[Any] = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ , lowercase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : List[str] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
snake_case_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 216 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _A ( lowerCAmelCase , lowerCAmelCase ):
snake_case__ : Tuple = 1
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = None ):
"""simple docstring"""
self.set_timesteps(__lowerCAmelCase )
# standard deviation of the initial noise distribution
lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase = 4
# running values
lowercase = []
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = num_inference_steps
lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase = torch.sin(steps * math.pi / 2 ) ** 2
lowercase = (1.0 - self.betas**2) ** 0.5
lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase = timesteps.to(__lowerCAmelCase )
lowercase = []
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
lowercase = (self.timesteps == timestep).nonzero().item()
lowercase = timestep_index + 1
lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowerCAmelCase )
if len(self.ets ) == 1:
lowercase = self.ets[-1]
elif len(self.ets ) == 2:
lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase = self._get_prev_sample(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return sample
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.alphas[timestep_index]
lowercase = self.betas[timestep_index]
lowercase = self.alphas[prev_timestep_index]
lowercase = self.betas[prev_timestep_index]
lowercase = (sample - sigma * ets) / max(__lowerCAmelCase , 1E-8 )
lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 197 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : str = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : int = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 197 | 1 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( UpperCAmelCase ):
__magic_name__ = ''''''
__magic_name__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Optional[Any] , A : Optional[DatasetInfo] = None , A : Optional[str] = None , **A : str , ):
'''simple docstring'''
super().__init__(self , **A )
a : Union[str, Any] = repo_info
a : str = token
a : Union[str, Any] = None
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
if self.dir_cache is None:
a : Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a : Dict = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(A ): {'name': str(A ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self : List[Any] , A : str , A : str = "rb" , **A : Optional[Any] , ):
'''simple docstring'''
if not isinstance(self.repo_info , A ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a : List[Any] = hf_hub_url(self.repo_info.id , A , revision=self.repo_info.sha )
return fsspec.open(
A , mode=A , headers=get_authentication_headers_for_url(A , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self : Tuple , A : Any , **A : str ):
'''simple docstring'''
self._get_dirs()
a : str = self._strip_protocol(A )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A )
def lowerCamelCase__ ( self : int , A : int , A : Optional[int]=False , **A : Tuple ):
'''simple docstring'''
self._get_dirs()
a : Tuple = PurePosixPath(path.strip('/' ) )
a : str = {}
for p, f in self.dir_cache.items():
a : Tuple = PurePosixPath(p.strip('/' ) )
a : int = p.parent
if root == path:
a : List[Any] = f
a : Optional[int] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 186 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 186 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( _UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ):
__lowerCamelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
__lowerCamelCase = np.array(_UpperCamelCase )
__lowerCamelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() ,_UpperCamelCase ) ) ,x.transpose() ) ,_UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( _UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ):
__lowerCamelCase = (1, 2, 1)
__lowerCamelCase = (1, 1, 0, 7)
__lowerCamelCase = SARIMAX(
_UpperCamelCase ,exog=_UpperCamelCase ,order=_UpperCamelCase ,seasonal_order=_UpperCamelCase )
__lowerCamelCase = model.fit(disp=_UpperCamelCase ,maxiter=6_00 ,method='''nm''' )
__lowerCamelCase = model_fit.predict(1 ,len(_UpperCamelCase ) ,exog=[test_match] )
return result[0]
def a__ ( _UpperCamelCase : list ,_UpperCamelCase : list ,_UpperCamelCase : list ):
__lowerCamelCase = SVR(kernel='''rbf''' ,C=1 ,gamma=0.1 ,epsilon=0.1 )
regressor.fit(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = regressor.predict(_UpperCamelCase )
return y_pred[0]
def a__ ( _UpperCamelCase : list ):
train_user.sort()
__lowerCamelCase = np.percentile(_UpperCamelCase ,25 )
__lowerCamelCase = np.percentile(_UpperCamelCase ,75 )
__lowerCamelCase = qa - qa
__lowerCamelCase = qa - (iqr * 0.1)
return low_lim
def a__ ( _UpperCamelCase : list ,_UpperCamelCase : float ):
__lowerCamelCase = 0
__lowerCamelCase = 0
for i in list_vote:
if i > actual_result:
__lowerCamelCase = not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
a_ = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
a_ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
a_ = Normalizer().fit_transform(data_input_df.values)
# split data
a_ = normalize_df[:, 2].tolist()
a_ = normalize_df[:, 0].tolist()
a_ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
a_ = normalize_df[:, [1, 2]].tolist()
a_ = x[: len(x) - 1]
a_ = x[len(x) - 1 :]
# for linear regression & sarimax
a_ = total_date[: len(total_date) - 1]
a_ = total_user[: len(total_user) - 1]
a_ = total_match[: len(total_match) - 1]
a_ = total_date[len(total_date) - 1 :]
a_ = total_user[len(total_user) - 1 :]
a_ = total_match[len(total_match) - 1 :]
# voting system with forecasting
a_ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
a_ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 330 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | 1 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = """T5Config"""
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
| 298 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> str:
"""simple docstring"""
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a)
| 317 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
with open(snake_case__ , """rb""" ) as flax_state_f:
lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
lowerCamelCase = """"""
lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" )
lowerCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase = []
lowerCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
lowerCamelCase = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCamelCase = """.""".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
lowerCamelCase = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
lowerCamelCase = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 291 | 0 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = PriorTransformer
__lowerCAmelCase = '''hidden_states'''
@property
def _lowerCamelCase ( self ):
__a : Union[str, Any] = 4
__a : List[str] = 8
__a : List[str] = 7
__a : List[str] = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
__a : str = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
__a : Union[str, Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
torch.manual_seed(_UpperCAmelCase )
__a : Any = 4
__a : Dict = 8
__a : Tuple = 7
__a : List[str] = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
__a : Any = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
__a : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCamelCase ( self ):
return (4, 8)
@property
def _lowerCamelCase ( self ):
return (4, 8)
def _lowerCamelCase ( self ):
__a : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
__a : Any = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
__a , __a : str = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_UpperCAmelCase )
__a : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCamelCase ( self ):
__a , __a : Dict = self.prepare_init_args_and_inputs_for_common()
__a : int = self.model_class(**_UpperCAmelCase )
__a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : List[Any] = [*signature.parameters.keys()]
__a : str = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
__a : Union[str, Any] = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
__a : List[str] = self.get_dummy_seed_input()
with torch.no_grad():
__a : Optional[Any] = model(**_UpperCAmelCase )[0]
__a : str = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__a : int = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-2 ) )
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self , _UpperCAmelCase=1 , _UpperCAmelCase=768 , _UpperCAmelCase=77 , _UpperCAmelCase=0 ):
torch.manual_seed(_UpperCAmelCase )
__a : Tuple = batch_size
__a : Optional[int] = embedding_dim
__a : int = num_embeddings
__a : str = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
__a : Tuple = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
__a : Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : str = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(_UpperCAmelCase )
__a : str = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
__a : Any = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
__a : Optional[int] = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
__a : Any = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) | 188 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
__a : Any = np.random.RandomState(_UpperCAmelCase )
__a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# warmup pass to apply optimizations
__a : Any = pipe(**self.get_dummy_inputs() )
__a : List[str] = self.get_dummy_inputs()
__a : Tuple = pipe(**_UpperCAmelCase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : Optional[Any] = ort.SessionOptions()
__a : Any = False
return options
def _lowerCamelCase ( self ):
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = '''A fantasy landscape, trending on artstation'''
__a : Tuple = np.random.RandomState(0 )
__a : int = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : List[Any] = output.images
__a : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Any = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
__a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
__a : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = '''A fantasy landscape, trending on artstation'''
__a : str = np.random.RandomState(0 )
__a : str = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Dict = output.images
__a : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 188 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Dict = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class snake_case_( _a ):
__UpperCamelCase = """encodec"""
def __init__( self : Any , UpperCamelCase_ : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase_ : str=2_4_0_0_0 , UpperCamelCase_ : List[Any]=1 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[Any]=1_2_8 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : str=[8, 5, 4, 2] , UpperCamelCase_ : Union[str, Any]="weight_norm" , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=True , UpperCamelCase_ : str="reflect" , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : str=1.0 , UpperCamelCase_ : str=1_0_2_4 , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=True , **UpperCamelCase_ : Dict , ):
lowerCAmelCase : Any = target_bandwidths
lowerCAmelCase : str = sampling_rate
lowerCAmelCase : Union[str, Any] = audio_channels
lowerCAmelCase : Optional[int] = normalize
lowerCAmelCase : Optional[Any] = chunk_length_s
lowerCAmelCase : str = overlap
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : List[str] = num_filters
lowerCAmelCase : Union[str, Any] = num_residual_layers
lowerCAmelCase : Tuple = upsampling_ratios
lowerCAmelCase : Tuple = norm_type
lowerCAmelCase : List[str] = kernel_size
lowerCAmelCase : Tuple = last_kernel_size
lowerCAmelCase : Union[str, Any] = residual_kernel_size
lowerCAmelCase : Union[str, Any] = dilation_growth_rate
lowerCAmelCase : Any = use_causal_conv
lowerCAmelCase : str = pad_mode
lowerCAmelCase : Dict = compress
lowerCAmelCase : Union[str, Any] = num_lstm_layers
lowerCAmelCase : List[Any] = trim_right_ratio
lowerCAmelCase : str = codebook_size
lowerCAmelCase : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase__ ( self : List[str] ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 60 |
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowerCAmelCase = tmp_path_factory.getbasetemp() / """cache"""
lowerCAmelCase = test_hf_cache_home / """datasets"""
lowerCAmelCase = test_hf_cache_home / """metrics"""
lowerCAmelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="""session""" )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _SCREAMING_SNAKE_CASE ) | 187 |
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase = {} # Mapping from char to TrieNode
lowerCAmelCase = False
def __snake_case ( self , A_ ) -> None:
for word in words:
self.insert(A_ )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase = TrieNode()
lowerCAmelCase = curr.nodes[char]
lowerCAmelCase = True
def __snake_case ( self , A_ ) -> bool:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self , A_ ) -> None:
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase = False
return len(curr.nodes ) == 0
lowerCAmelCase = word[index]
lowerCAmelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def _snake_case ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(_SCREAMING_SNAKE_CASE , word + key )
def _snake_case ( ) -> bool:
"""simple docstring"""
lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase = TrieNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> None:
"""simple docstring"""
print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def _snake_case ( ) -> None:
"""simple docstring"""
assert test_trie()
def _snake_case ( ) -> None:
"""simple docstring"""
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main() | 187 | 1 |
def A (__A : str , __A : str ) -> float:
"""simple docstring"""
def get_matched_characters(__A : str , __A : str ) -> str:
UpperCAmelCase_ = []
UpperCAmelCase_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase_ = int(max(0 , i - limit ) )
UpperCAmelCase_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase_ = F"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase_ = get_matched_characters(__A , __A )
UpperCAmelCase_ = get_matched_characters(__A , __A )
UpperCAmelCase_ = len(__A )
# transposition
UpperCAmelCase_ = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase_ = 0.0
else:
UpperCAmelCase_ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 51 |
def _lowercase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:]
__lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __UpperCAmelCase (__UpperCamelCase ):
__snake_case : jnp.ndarray
__snake_case : jnp.ndarray
class __UpperCAmelCase (nn.Module ):
__snake_case : int
__snake_case : Tuple[int] = (16, 32, 96, 256)
__snake_case : jnp.dtype = jnp.floataa
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
_SCREAMING_SNAKE_CASE = self.block_out_channels[i]
_SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = blocks
_SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.conv_in(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.silu(UpperCAmelCase_ )
for block in self.blocks:
_SCREAMING_SNAKE_CASE = block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.silu(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.conv_out(UpperCAmelCase_ )
return embedding
@flax_register_to_config
class __UpperCAmelCase (nn.Module ,__UpperCamelCase ,__UpperCamelCase ):
__snake_case : int = 32
__snake_case : int = 4
__snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case : Union[bool, Tuple[bool]] = False
__snake_case : Tuple[int] = (320, 640, 1280, 1280)
__snake_case : int = 2
__snake_case : Union[int, Tuple[int]] = 8
__snake_case : Optional[Union[int, Tuple[int]]] = None
__snake_case : int = 1280
__snake_case : float = 0.0
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
__snake_case : bool = True
__snake_case : int = 0
__snake_case : str = "rgb"
__snake_case : Tuple[int] = (16, 32, 96, 256)
def UpperCamelCase ( self: int , UpperCAmelCase_: jax.random.KeyArray ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
_SCREAMING_SNAKE_CASE = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
_SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
_SCREAMING_SNAKE_CASE = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = jax.random.split(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["params"]
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.block_out_channels
_SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
_SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(UpperCAmelCase_ , dtype=self.dtype )
_SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = block_out_channels[0]
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
_SCREAMING_SNAKE_CASE = output_channel
_SCREAMING_SNAKE_CASE = block_out_channels[i]
_SCREAMING_SNAKE_CASE = i == len(UpperCAmelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase_ )
for _ in range(self.layers_per_block ):
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase_ )
if not is_final_block:
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = down_blocks
_SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
_SCREAMING_SNAKE_CASE = block_out_channels[-1]
_SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCAmelCase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: List[str] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: float = 1.0 , UpperCAmelCase_: bool = True , UpperCAmelCase_: bool = False , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_SCREAMING_SNAKE_CASE = jnp.flip(UpperCAmelCase_ , axis=1 )
# 1. time
if not isinstance(UpperCAmelCase_ , jnp.ndarray ):
_SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = jnp.expand_dims(UpperCAmelCase_ , 0 )
_SCREAMING_SNAKE_CASE = self.time_proj(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.time_embedding(UpperCAmelCase_ )
# 2. pre-process
_SCREAMING_SNAKE_CASE = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE = self.conv_in(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(UpperCAmelCase_ )
sample += controlnet_cond
# 3. down
_SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = down_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
else:
_SCREAMING_SNAKE_CASE = down_block(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_SCREAMING_SNAKE_CASE = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
# 5. contronet blocks
_SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(UpperCAmelCase_ , self.controlnet_down_blocks ):
_SCREAMING_SNAKE_CASE = controlnet_block(UpperCAmelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
_SCREAMING_SNAKE_CASE = self.controlnet_mid_block(UpperCAmelCase_ )
# 6. scaling
_SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCAmelCase_ , mid_block_res_sample=UpperCAmelCase_ )
| 356 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = OpenAIGPTTokenizer
__snake_case : Dict = OpenAIGPTTokenizerFast
__snake_case : Optional[Any] = True
__snake_case : Dict = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE = """lower"""
_SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
_SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_SCREAMING_SNAKE_CASE = """This is a simple input"""
_SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""]
_SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""")
_SCREAMING_SNAKE_CASE = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ):
pass
| 125 | 0 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowerCamelCase__ )
class UpperCamelCase ( lowerCamelCase__ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = {}
if truncation is not None:
lowercase_ : str = truncation
lowercase_ : int = generate_kwargs
lowercase_ : str = {}
if return_tensors is not None and return_type is None:
lowercase_ : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : Tuple = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : str = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : List[str] = self.tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
if len(__lowerCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] ,__lowerCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : Optional[Any] = ([prefix + arg for arg in args[0]],)
lowercase_ : Optional[int] = True
elif isinstance(args[0] ,__lowerCamelCase ):
lowercase_ : int = (prefix + args[0],)
lowercase_ : int = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : str = self.tokenizer(*__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = super().__call__(*__lowerCamelCase ,**__lowerCamelCase )
if (
isinstance(args[0] ,__lowerCamelCase )
and all(isinstance(__lowerCamelCase ,__lowerCamelCase ) for el in args[0] )
and all(len(__lowerCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : List[str] = self._parse_and_tokenize(__lowerCamelCase ,truncation=__lowerCamelCase ,**__lowerCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
if self.framework == "pt":
lowercase_ : int = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase_ : List[Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : Optional[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__lowerCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Optional[int] = self.model.generate(**__lowerCamelCase ,**__lowerCamelCase )
lowercase_ : Optional[int] = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : Dict = output_ids.reshape(__lowerCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : Any = tf.reshape(__lowerCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> str:
'''simple docstring'''
lowercase_ : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : Tuple = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : List[str] = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__lowerCamelCase ,skip_special_tokens=__lowerCamelCase ,clean_up_tokenization_spaces=__lowerCamelCase ,)
}
records.append(__lowerCamelCase )
return records
@add_end_docstrings(lowerCamelCase__ )
class UpperCamelCase ( lowerCamelCase__ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return super().__call__(*__lowerCamelCase ,**__lowerCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowerCamelCase__ )
class UpperCamelCase ( lowerCamelCase__ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__lowerCamelCase ):
return self.tokenizer._build_translation_inputs(
*__lowerCamelCase ,return_tensors=self.framework ,truncation=__lowerCamelCase ,src_lang=__lowerCamelCase ,tgt_lang=__lowerCamelCase )
else:
return super()._parse_and_tokenize(*__lowerCamelCase ,truncation=__lowerCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[str] = super()._sanitize_parameters(**__lowerCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[int] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : str = kwargs.get('task' ,self.task )
lowercase_ : Union[str, Any] = task.split('_' )
if task and len(__lowerCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : List[Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return super().__call__(*__lowerCamelCase ,**__lowerCamelCase )
| 213 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 | 0 |
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray:
return vector * sigmoid(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : Any =None
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[Any] ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
A_ : Union[str, Any] ="""▁"""
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 80 | 1 |
def A ( a_ ,a_ ) -> bool:
__UpperCamelCase : str =len(a_ )
__UpperCamelCase : Any =len(a_ )
__UpperCamelCase : Union[str, Any] =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCamelCase : str =True
for i in range(a_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCamelCase : Union[str, Any] =True
if a[i].islower():
__UpperCamelCase : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
from torch import nn
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : Dict =class_size
__UpperCamelCase : Any =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__UpperCamelCase : Any =nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.mlp(lowerCamelCase__ )
return logits
| 71 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a__: Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,**__lowerCamelCase ):
super().__init__(**__lowerCamelCase )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self,'''vision''' )
self.check_model_type(__lowerCamelCase )
def __call__( self,__lowerCamelCase,__lowerCamelCase = None,**__lowerCamelCase,):
if "text_queries" in kwargs:
A__ = kwargs.pop('''text_queries''' )
if isinstance(__lowerCamelCase,(str, Image.Image) ):
A__ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
A__ = image
A__ = super().__call__(__lowerCamelCase,**__lowerCamelCase )
return results
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {}
if "threshold" in kwargs:
A__ = kwargs['''threshold''']
if "top_k" in kwargs:
A__ = kwargs['''top_k''']
return {}, {}, postprocess_params
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = load_image(inputs['''image'''] )
A__ = inputs['''candidate_labels''']
if isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = candidate_labels.split(''',''' )
A__ = torch.tensor([[image.height, image.width]],dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCamelCase ):
A__ = self.tokenizer(__lowerCamelCase,return_tensors=self.framework )
A__ = self.image_processor(__lowerCamelCase,return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = model_inputs.pop('''target_size''' )
A__ = model_inputs.pop('''candidate_label''' )
A__ = model_inputs.pop('''is_last''' )
A__ = self.model(**__lowerCamelCase )
A__ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0.1,__lowerCamelCase=None ):
A__ = []
for model_output in model_outputs:
A__ = model_output['''candidate_label''']
A__ = BaseModelOutput(__lowerCamelCase )
A__ = self.image_processor.post_process_object_detection(
outputs=__lowerCamelCase,threshold=__lowerCamelCase,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
A__ = outputs['''scores'''][index].item()
A__ = self._get_bounding_box(outputs['''boxes'''][index][0] )
A__ = {'''score''': score, '''label''': label, '''box''': box}
results.append(__lowerCamelCase )
A__ = sorted(__lowerCamelCase,key=lambda __lowerCamelCase : x["score"],reverse=__lowerCamelCase )
if top_k:
A__ = results[:top_k]
return results
def UpperCamelCase ( self,__lowerCamelCase ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
A__ , A__ , A__ , A__ = box.int().tolist()
A__ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 39 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : str = len(lowerCAmelCase__ )
__a : Optional[int] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : str = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Tuple = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 216 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class UpperCamelCase__ ( __snake_case , __snake_case ):
'''simple docstring'''
__snake_case : Optional[Any] = "dinat"
__snake_case : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Union[str, Any]=64 ,lowerCamelCase__ : Optional[int]=[3, 4, 6, 5] ,lowerCamelCase__ : int=[2, 4, 8, 16] ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,lowerCamelCase__ : Tuple=3.0 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : Optional[Any]=0.02 ,lowerCamelCase__ : List[Any]=1e-5 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Dict ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = dilations
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
| 365 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = StableDiffusionPanoramaPipeline
snake_case = TEXT_TO_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ : str = DDIMScheduler()
torch.manual_seed(0 )
A_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : Optional[Any] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
A_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Optional[int]:
'''simple docstring'''
A_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Dict = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : List[Any] = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Any = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->Dict:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : str = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''french fries'''
A_ : Dict = sd_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
A_ : Dict = output.images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Union[str, Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : Any = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ : int = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Any = sd_pipe(**_SCREAMING_SNAKE_CASE , view_batch_size=2 )
A_ : str = output.images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Optional[int] = self.get_dummy_components()
A_ : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
A_ : int = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ : int = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Dict = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : str = self.get_dummy_components()
A_ : List[Any] = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ : Tuple = StableDiffusionPanoramaPipeline(**_SCREAMING_SNAKE_CASE )
A_ : List[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Any = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 )->int:
'''simple docstring'''
A_ : Union[str, Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ : int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[str] = '''stabilityai/stable-diffusion-2-base'''
A_ : Optional[int] = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
A_ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ : str = self.get_inputs()
A_ : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A_ : List[str] = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : str = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_SCREAMING_SNAKE_CASE )
A_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ : Union[str, Any] = self.get_inputs()
A_ : Dict = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A_ : Optional[int] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[str] = 0
def callback_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A_ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A_ : List[Any] = latents[0, -3:, -3:, -1]
A_ : Dict = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A_ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A_ : Tuple = latents[0, -3:, -3:, -1]
A_ : Tuple = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A_ : int = False
A_ : int = '''stabilityai/stable-diffusion-2-base'''
A_ : str = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
A_ : str = StableDiffusionPanoramaPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ : Optional[int] = self.get_inputs()
pipe(**_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self )->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : int = '''stabilityai/stable-diffusion-2-base'''
A_ : Optional[int] = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
A_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : int = self.get_inputs()
A_ : List[Any] = pipe(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 186 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileViTFeatureExtractor"""]
UpperCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 | 1 |
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Optional[int] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
_snake_case : dict[str, str | None] = {}
_snake_case : str = source_vertex
def UpperCamelCase ( self ):
_snake_case : List[Any] = {self.source_vertex}
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
_snake_case : Any = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase_ )
_snake_case : Dict = vertex
queue.append(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
if target_vertex == self.source_vertex:
return self.source_vertex
_snake_case : int = self.parent.get(lowercase_ )
if target_vertex_parent is None:
_snake_case : List[str] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowercase_ )
return self.shortest_path(lowercase_ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 354 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure) | 284 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
_lowerCAmelCase = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
_lowerCAmelCase = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
_lowerCAmelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
_lowerCAmelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 298 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = getLogger(__name__)
def lowerCAmelCase_( lowercase_ , lowercase_ , lowercase_ , lowercase_ = 8 , lowercase_ = 10_24 , lowercase_="val" , lowercase_=None , lowercase_=False , lowercase_="summarization" , lowercase_=None , lowercase_=1 , lowercase_ = None , lowercase_="" , **lowercase_ , ) -> List[Any]:
_lowerCamelCase = str(lowercase_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=lowercase_ )
_lowerCamelCase = Path(lowercase_ )
_lowerCamelCase = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(lowercase_ )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(lowercase_ ).cuda()
if fpaa:
_lowerCamelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowercase_ , lowercase_ ) # update config with task specific params
_lowerCamelCase = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowerCamelCase = num_return_sequences
_lowerCamelCase = AutoTokenizer.from_pretrained(lowercase_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowerCamelCase = tokenizer.model_max_length
if prefix is None:
_lowerCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
_lowerCamelCase = SeqaSeqDataset(
lowercase_ , lowercase_ , lowercase_ , max_target_length=10_24 , type_path=lowercase_ , n_obs=lowercase_ , prefix=lowercase_ , **lowercase_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowerCamelCase = ds.make_sortish_sampler(lowercase_ , distributed=lowercase_ , add_extra_examples=lowercase_ , shuffle=lowercase_ )
_lowerCamelCase = DataLoader(lowercase_ , sampler=lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn )
_lowerCamelCase = []
for batch in tqdm(lowercase_ ):
_lowerCamelCase = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowercase_ , num_beams=lowercase_ , **lowercase_ , )
_lowerCamelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
_lowerCamelCase = batch['''ids''']
if num_return_sequences > 1:
_lowerCamelCase = chunks(lowercase_ , lowercase_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowercase_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(lowercase_ , lowercase_ )
return results, sampler.num_replicas
def lowerCAmelCase_( ) -> int:
_lowerCamelCase = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=lowercase_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=lowercase_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=lowercase_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=lowercase_ , default=lowercase_ )
parser.add_argument(
'''--type_path''' , type=lowercase_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=lowercase_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowercase_ , default=8 , required=lowercase_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=lowercase_ , default=-1 , required=lowercase_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=lowercase_ , default=lowercase_ , required=lowercase_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=lowercase_ , default=1 , required=lowercase_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=lowercase_ , default=6_00 , required=lowercase_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=lowercase_ , default=lowercase_ , required=lowercase_ )
parser.add_argument('''--tgt_lang''' , type=lowercase_ , default=lowercase_ , required=lowercase_ )
parser.add_argument(
'''--prefix''' , type=lowercase_ , required=lowercase_ , default=lowercase_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
_lowerCamelCase = time.time()
_lowerCamelCase , _lowerCamelCase = parser.parse_known_args()
_lowerCamelCase = parse_numeric_n_bool_cl_kwargs(lowercase_ )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
_lowerCamelCase = Path(args.save_dir + '''_tmp''' )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) # this handles locking.
_lowerCamelCase = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowerCamelCase = {}
if args.src_lang is not None:
_lowerCamelCase = args.src_lang
if args.tgt_lang is not None:
_lowerCamelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowercase_ )
_lowerCamelCase , _lowerCamelCase = eval_data_dir(
args.data_dir , lowercase_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowercase_ , **lowercase_ , )
if args.local_rank <= 0:
_lowerCamelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowercase_ )
_lowerCamelCase = gather_results_from_each_node(lowercase_ , lowercase_ , args.sync_timeout )
_lowerCamelCase = combine_partial_results(lowercase_ )
if args.num_return_sequences > 1:
_lowerCamelCase = save_dir.joinpath('''pseudolabel_results.json''' )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(lowercase_ , lowercase_ )
return
_lowerCamelCase = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(lowercase_ ) as f:
_lowerCamelCase = [x.rstrip() for x in f.readlines()][: len(lowercase_ )]
# Calculate metrics, save metrics, and save _generations.txt
_lowerCamelCase = '''translation''' in args.task
_lowerCamelCase = calculate_bleu if calc_bleu else calculate_rouge
_lowerCamelCase = '''bleu''' if calc_bleu else '''rouge'''
_lowerCamelCase = score_fn(lowercase_ , lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = time.time() - start_time
_lowerCamelCase = round(runtime / metrics['''n_obs'''] , 4 )
_lowerCamelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowerCamelCase = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(lowercase_ , lowercase_ , indent=lowercase_ )
print(lowercase_ )
write_txt_file(lowercase_ , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(lowercase_ , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(lowercase_ )
def lowerCAmelCase_( lowercase_ ) -> Union[str, Any]:
_lowerCamelCase = []
for partial_result in partial_results:
records.extend(lowercase_ )
_lowerCamelCase = sorted(lowercase_ , key=lambda lowercase_ : x["id"] )
_lowerCamelCase = [x['''pred'''] for x in records]
return preds
def lowerCAmelCase_( lowercase_ , lowercase_ , lowercase_ ) -> Any:
_lowerCamelCase = time.time()
logger.info('''waiting for all nodes to finish''' )
_lowerCamelCase = None
while (time.time() - start_wait) < timeout:
_lowerCamelCase = list(save_dir.glob('''rank_*.json''' ) )
if len(lowercase_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowerCamelCase = lmap(lowercase_ , lowercase_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 352 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : List[str] ) -> Optional[Any]:
_lowerCamelCase = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
_lowerCamelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCamelCase = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
_lowerCamelCase = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 73 | 0 |
def UpperCAmelCase__ ( _A : int ):
'''simple docstring'''
if num < 0:
return False
a__ =num
a__ =0
while num > 0:
a__ =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 |
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : str ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 188 | 1 |
"""simple docstring"""
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , **snake_case__ , ):
"""simple docstring"""
super().__init__(features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = Sql(
cache_dir=snake_case__ , features=snake_case__ , sql=snake_case__ , con=snake_case__ , **snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : str = None
lowerCAmelCase : int = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , )
# Build dataset for splits
lowerCAmelCase : str = self.builder.as_dataset(
split="train" , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowerCAmelCase : Tuple = dataset
lowerCAmelCase : Tuple = name
lowerCAmelCase : List[str] = con
lowerCAmelCase : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase : Optional[int] = num_proc
lowerCAmelCase : Optional[int] = to_sql_kwargs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.to_sql_kwargs.pop("sql" , snake_case__ )
lowerCAmelCase : List[Any] = self.to_sql_kwargs.pop("con" , snake_case__ )
lowerCAmelCase : Dict = self.to_sql_kwargs.pop("index" , snake_case__ )
lowerCAmelCase : Dict = self._write(index=snake_case__ , **self.to_sql_kwargs )
return written
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = args
lowerCAmelCase : Optional[int] = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
lowerCAmelCase : List[Any] = query_table(
table=self.dataset.data , key=slice(snake_case__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase : Tuple = batch.to_pandas()
lowerCAmelCase : str = df.to_sql(self.name , self.con , index=snake_case__ , **snake_case__ )
return num_rows or len(snake_case__ )
def lowercase__ ( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCAmelCase , lowerCAmelCase : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , snake_case__ , snake_case__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 133 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = int(_A )
snake_case_ , snake_case_ , snake_case_ = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def lowerCamelCase__ ( _A , _A , _A , _A , _A=300 ):
'''simple docstring'''
return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case_ = f"{elt:.6f}" if isinstance(_A , _A ) else str(_A )
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 5
lowerCAmelCase_ = 0.2
def __init__( self : str , __lowercase : int , __lowercase : Optional[str] = None , __lowercase : bool = True , __lowercase : Optional["NotebookTrainingTracker"] = None , __lowercase : int = 3_00 , ):
"""simple docstring"""
snake_case_ = total
snake_case_ = "" if prefix is None else prefix
snake_case_ = leave
snake_case_ = parent
snake_case_ = width
snake_case_ = None
snake_case_ = None
snake_case_ = None
def snake_case__ ( self : str , __lowercase : int , __lowercase : bool = False , __lowercase : str = None ):
"""simple docstring"""
snake_case_ = value
if comment is not None:
snake_case_ = comment
if self.last_value is None:
snake_case_ = snake_case_ = time.time()
snake_case_ = snake_case_ = value
snake_case_ = snake_case_ = None
snake_case_ = self.warmup
snake_case_ = 1
self.update_bar(__lowercase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case_ = time.time()
snake_case_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case_ = self.elapsed_time / (value - self.start_value)
else:
snake_case_ = None
if value >= self.total:
snake_case_ = self.total
snake_case_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case_ = self.average_time_per_item * (self.total - value)
self.update_bar(__lowercase )
snake_case_ = value
snake_case_ = current_time
if self.average_time_per_item is None:
snake_case_ = 1
else:
snake_case_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case__ ( self : int , __lowercase : Union[str, Any] , __lowercase : List[str]=None ):
"""simple docstring"""
snake_case_ = " " * (len(str(self.total ) ) - len(str(__lowercase ) )) + str(__lowercase )
if self.elapsed_time is None:
snake_case_ = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
snake_case_ = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"
else:
snake_case_ = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"
f" {format_time(self.predicted_remaining )}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f", {self.comment}]"
self.display()
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case_ = disp.display(disp.HTML(self.html_code ) , display_id=__lowercase )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : str , __lowercase : int , __lowercase : Any=None ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = None if column_names is None else [column_names]
snake_case_ = None
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case_ = disp.display(disp.HTML(self.html_code ) , display_id=__lowercase )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[int] ):
"""simple docstring"""
if self.inner_table is None:
snake_case_ = [list(values.keys() ), list(values.values() )]
else:
snake_case_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowercase )
snake_case_ = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case__ ( self : str , __lowercase : Optional[int] , __lowercase : int=None , __lowercase : Any=3_00 ):
"""simple docstring"""
snake_case_ = NotebookProgressBar(__lowercase , prefix=__lowercase , parent=self , width=__lowercase )
return self.child_bar
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = None
self.display()
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : str ):
"""simple docstring"""
snake_case_ = None
snake_case_ = None
snake_case_ = False
def snake_case__ ( self : int , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Dict , **__lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
snake_case_ = 0
snake_case_ = 0
snake_case_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
snake_case_ = NotebookTrainingTracker(state.max_steps , __lowercase )
def snake_case__ ( self : int , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Union[str, Any] , **__lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
snake_case_ = False
def snake_case__ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Any=None , **__lowercase : List[str] ):
"""simple docstring"""
if not has_length(__lowercase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case_ = self.training_tracker.add_child(len(__lowercase ) )
else:
snake_case_ = NotebookProgressBar(len(__lowercase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case__ ( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case_ = None
def snake_case__ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : List[Any]=None , **__lowercase : Optional[int] ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case_ = state.global_step
self.training_tracker.write_line(__lowercase )
def snake_case__ ( self : Tuple , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Tuple=None , **__lowercase : str ):
"""simple docstring"""
if self.training_tracker is not None:
snake_case_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case_ = log["loss"]
break
if self.first_column == "Epoch":
snake_case_ = int(state.epoch )
else:
snake_case_ = state.global_step
snake_case_ = "eval"
for k in metrics:
if k.endswith("_loss" ):
snake_case_ = re.sub(r"\_loss$" , "" , __lowercase )
snake_case_ = metrics.pop("total_flos" , __lowercase )
snake_case_ = metrics.pop("epoch" , __lowercase )
snake_case_ = metrics.pop(f"{metric_key_prefix}_runtime" , __lowercase )
snake_case_ = metrics.pop(f"{metric_key_prefix}_samples_per_second" , __lowercase )
snake_case_ = metrics.pop(f"{metric_key_prefix}_steps_per_second" , __lowercase )
snake_case_ = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , __lowercase )
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
snake_case_ = v
else:
snake_case_ = k.split("_" )
snake_case_ = " ".join([part.capitalize() for part in splits[1:]] )
snake_case_ = v
self.training_tracker.write_line(__lowercase )
self.training_tracker.remove_child()
snake_case_ = None
# Evaluation takes a long time so we should force the next update.
snake_case_ = True
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=__lowercase )
snake_case_ = None
| 187 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
snake_case_ = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case_ = requests.get(_A , headers=_A ).json()
snake_case_ = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_A ):
snake_case_ = requests.get(url + f"&page={i + 2}" , headers=_A ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
snake_case_ = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case_ = requests.get(_A , headers=_A ).json()
snake_case_ = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_A ):
snake_case_ = requests.get(url + f"&page={i + 2}" , headers=_A ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
snake_case_ = requests.get(_A , headers=_A , allow_redirects=_A )
snake_case_ = result.headers["Location"]
snake_case_ = requests.get(_A , allow_redirects=_A )
snake_case_ = os.path.join(_A , f"{artifact_name}.zip" )
with open(_A , "wb" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
snake_case_ = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ = line[: line.index(": " )]
snake_case_ = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
snake_case_ = line[len("FAILED " ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
snake_case_ = line
if len(_A ) != len(_A ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` "
f"and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
snake_case_ = None
if job_name and job_links:
snake_case_ = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
snake_case_ = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = []
snake_case_ = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = Counter()
counter.update([x[1] for x in logs] )
snake_case_ = counter.most_common()
snake_case_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = test.split("::" )[0]
if test.startswith("tests/models/" ):
snake_case_ = test.split("/" )[2]
else:
snake_case_ = None
return test
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ = [x for x in logs if x[2] is not None]
snake_case_ = {x[2] for x in logs}
snake_case_ = {}
for test in tests:
snake_case_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ = counter.most_common()
snake_case_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ = sum(error_counts.values() )
if n_errors > 0:
snake_case_ = {"count": n_errors, "errors": error_counts}
snake_case_ = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = "| no. | error | status |"
snake_case_ = "|-:|:-|:-|"
snake_case_ = [header, sep]
for error in reduced_by_error:
snake_case_ = reduced_by_error[error]["count"]
snake_case_ = f"| {count} | {error[:100]} | |"
lines.append(_A )
return "\n".join(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = "| model | no. of errors | major error | count |"
snake_case_ = "|-:|-:|-:|-:|"
snake_case_ = [header, sep]
for model in reduced_by_model:
snake_case_ = reduced_by_model[model]["count"]
snake_case_ , snake_case_ = list(reduced_by_model[model]["errors"].items() )[0]
snake_case_ = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
lowercase__ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ : List[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ : Tuple = k.find(" / ")
lowercase__ : Union[str, Any] = k[index + len(" / ") :]
lowercase__ : Union[str, Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ : List[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ : Union[str, Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ : int = reduce_by_error(errors)
lowercase__ : List[str] = reduce_by_model(errors)
lowercase__ : Tuple = make_github_table(reduced_by_error)
lowercase__ : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 187 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ =logging.get_logger(__name__)
UpperCamelCase_ ={"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCamelCase_ ={
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
UpperCamelCase_ ="""▁"""
class _a ( _lowerCAmelCase ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any], lowerCAmelCase__ : int, lowerCAmelCase__ : Union[str, Any]="<s>", lowerCAmelCase__ : str="</s>", lowerCAmelCase__ : Optional[Any]="</s>", lowerCAmelCase__ : int="<s>", lowerCAmelCase__ : Union[str, Any]="<unk>", lowerCAmelCase__ : Union[str, Any]="<pad>", lowerCAmelCase__ : Optional[Any]="<mask>", lowerCAmelCase__ : Optional[Dict[str, Any]] = None, **lowerCAmelCase__ : Optional[int], ) -> None:
'''simple docstring'''
_UpperCamelCase : Optional[int] = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__, lowerCAmelCase__ ) else mask_token
_UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, )
_UpperCamelCase : Union[str, Any] = vocab_file
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
_UpperCamelCase : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_UpperCamelCase : Dict = len(self.sp_model ) - 1
_UpperCamelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case ( self : List[str], lowerCAmelCase__ : List[int], lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Union[str, Any] = [self.cls_token_id]
_UpperCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Tuple, lowerCAmelCase__ : List[int], lowerCAmelCase__ : Optional[List[int]] = None, lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case ( self : Optional[int], lowerCAmelCase__ : List[int], lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : List[str] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def snake_case ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Optional[int], lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__ )
def snake_case ( self : str, lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : Optional[int] = self.sp_model.PieceToId(lowerCAmelCase__ )
return spm_id if spm_id else self.unk_token_id
def snake_case ( self : Tuple, lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def snake_case ( self : Optional[Any], lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : int = []
_UpperCamelCase : str = ''''''
_UpperCamelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCamelCase : str = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __getstate__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = self.__dict__.copy()
_UpperCamelCase : List[str] = None
return state
def __setstate__( self : Any, lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase : str = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
_UpperCamelCase : Any = {}
_UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase : Optional[Any] = os.path.join(
lowerCAmelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__, '''wb''' ) as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 350 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a_ ( _lowercase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def a_ ( _lowercase ):
# word like '180' or '身高' or '神'
for char in word:
_UpperCamelCase : Dict = ord(_lowercase )
if not _is_chinese_char(_lowercase ):
return 0
return 1
def a_ ( _lowercase ):
_UpperCamelCase : List[str] = set()
for token in tokens:
_UpperCamelCase : int = len(_lowercase ) > 1 and is_chinese(_lowercase )
if chinese_word:
word_set.add(_lowercase )
_UpperCamelCase : Optional[int] = list(_lowercase )
return word_list
def a_ ( _lowercase , _lowercase ):
if not chinese_word_set:
return bert_tokens
_UpperCamelCase : Tuple = max([len(_lowercase ) for w in chinese_word_set] )
_UpperCamelCase : int = bert_tokens
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = 0, len(_lowercase )
while start < end:
_UpperCamelCase : Union[str, Any] = True
if is_chinese(bert_word[start] ):
_UpperCamelCase : List[Any] = min(end - start , _lowercase )
for i in range(_lowercase , 1 , -1 ):
_UpperCamelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCamelCase : int = '''##''' + bert_word[j]
_UpperCamelCase : int = start + i
_UpperCamelCase : Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def a_ ( _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : List[Any] = []
for i in range(0 , len(_lowercase ) , 100 ):
_UpperCamelCase : Optional[int] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
_UpperCamelCase : Optional[int] = [get_chinese_word(_lowercase ) for r in res]
ltp_res.extend(_lowercase )
assert len(_lowercase ) == len(_lowercase )
_UpperCamelCase : Dict = []
for i in range(0 , len(_lowercase ) , 100 ):
_UpperCamelCase : Optional[int] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowercase ) == len(_lowercase )
_UpperCamelCase : Optional[Any] = []
for input_ids, chinese_word in zip(_lowercase , _lowercase ):
_UpperCamelCase : str = []
for id in input_ids:
_UpperCamelCase : Dict = bert_tokenizer._convert_id_to_token(_lowercase )
input_tokens.append(_lowercase )
_UpperCamelCase : str = add_sub_symbol(_lowercase , _lowercase )
_UpperCamelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowercase ):
if token[:2] == "##":
_UpperCamelCase : int = token[2:]
# save chinese tokens' pos
if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ):
ref_id.append(_lowercase )
ref_ids.append(_lowercase )
assert len(_lowercase ) == len(_lowercase )
return ref_ids
def a_ ( _lowercase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
_UpperCamelCase : Tuple = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase : List[Any] = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase : int = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase : List[str] = prepare_ref(_lowercase , _lowercase , _lowercase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_UpperCamelCase : List[Any] = [json.dumps(_lowercase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCamelCase_ =parser.parse_args()
main(args)
| 128 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """num_attention_heads""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=6_40 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="silu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_hidden_size
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = conv_kernel_size
SCREAMING_SNAKE_CASE__ : int = output_stride
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : str = scope
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> str:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = MobileViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Dict = MobileViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Any = False
__UpperCamelCase : int = False
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : str = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE__ : Tuple = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Tuple = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 25 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 125 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_a = {
'camembert-base': 512,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : str = CamembertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : int = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : str = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Any = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 369 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Optional[int] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
a__ : str = {'mobilebert-uncased': 5_1_2}
a__ : int = {}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = MobileBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
UpperCamelCase__ = getattr(a , normalizer_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = tokenize_chinese_chars
UpperCamelCase__ = normalizer_class(**a )
UpperCamelCase__ = do_lower_case
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
A_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[int], a_: int = 101 ):
'''simple docstring'''
_snake_case : List[Any] = length
def __len__( self: List[str] ):
'''simple docstring'''
return self.length
def __getitem__( self: List[str], a_: Optional[Any] ):
'''simple docstring'''
return i
class lowercase:
'''simple docstring'''
def __call__( self: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
return {"input_ids": torch.tensor(a_ ), "labels": torch.tensor(a_ )}
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_snake_case : Any = nn.Linear(120, 80 )
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: Union[str, Any]=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class lowercase( __a ):
'''simple docstring'''
@require_torch_neuroncore
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_snake_case : List[str] = self.get_auto_remove_tmp_dir()
_snake_case : int = f"--output_dir {output_dir}".split()
_snake_case : Any = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a_, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase( __a ):
'''simple docstring'''
@require_torch_multi_gpu
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_snake_case : str = self.get_auto_remove_tmp_dir()
_snake_case : Optional[Any] = f"--output_dir {output_dir}".split()
_snake_case : Dict = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a_, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
A_ = HfArgumentParser((TrainingArguments,))
A_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
A_ = DummyDataset(dataset_length)
def UpperCAmelCase__ (snake_case__ : EvalPrediction ):
"""simple docstring"""
_snake_case : Any = list(range(len(snake_case__ ) ) )
_snake_case : List[str] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
A_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
A_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A_ = 2
A_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A_ = None
| 363 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[int], a_: str, a_: Optional[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self: Any, a_: int = 1, a_: int = 100, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: Optional[float] = None, a_: bool = True, ):
'''simple docstring'''
if audio_length_in_s is None:
_snake_case : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
_snake_case : Optional[int] = audio_length_in_s * self.unet.config.sample_rate
_snake_case : int = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
_snake_case : Union[str, Any] = int(a_ )
if sample_size % down_scale_factor != 0:
_snake_case : Optional[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
_snake_case : str = int(a_ )
_snake_case : int = next(iter(self.unet.parameters() ) ).dtype
_snake_case : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a_, a_ ) and len(a_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_snake_case : Optional[Any] = randn_tensor(a_, generator=a_, device=self.device, dtype=a_ )
# set step values
self.scheduler.set_timesteps(a_, device=audio.device )
_snake_case : Optional[int] = self.scheduler.timesteps.to(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_snake_case : str = self.unet(a_, a_ ).sample
# 2. compute previous image: x_t -> t_t-1
_snake_case : Optional[Any] = self.scheduler.step(a_, a_, a_ ).prev_sample
_snake_case : Tuple = audio.clamp(-1, 1 ).float().cpu().numpy()
_snake_case : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a_ )
| 132 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCAmelCase ):
_UpperCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCAmelCase ):
_UpperCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = FlaxBertModel.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = FlaxRobertaModel.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
def UpperCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
_UpperCAmelCase = FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_UpperCAmelCase = FlaxAutoModel.from_pretrained(UpperCAmelCase , revision='aaaaaa' )
def UpperCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_UpperCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(UpperCAmelCase , 'Use `from_pt=True` to load this model' ):
_UpperCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 39 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True , __lowerCamelCase = False ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = scheduler
_SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(__lowerCamelCase , (list, tuple) ) else [optimizers]
_SCREAMING_SNAKE_CASE : List[Any] = split_batches
_SCREAMING_SNAKE_CASE : List[str] = step_with_optimizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = GradientState()
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> List[str]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_SCREAMING_SNAKE_CASE : Dict = AcceleratorState().num_processes
for _ in range(__lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
else:
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
return self.scheduler.get_last_lr()
def UpperCamelCase_ ( self ) -> Optional[int]:
return self.scheduler.state_dict()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
self.scheduler.load_state_dict(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
return self.scheduler.get_lr()
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase ) | 325 |
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 325 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case ( lowercase , lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCamelCase = 768 , ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
lowerCamelCase_ = nn.Parameter(torch.ones(1 , UpperCamelCase ) )
def snake_case ( self , UpperCamelCase = None , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = nn.Parameter(self.mean.to(UpperCamelCase ).to(UpperCamelCase ) )
lowerCamelCase_ = nn.Parameter(self.std.to(UpperCamelCase ).to(UpperCamelCase ) )
return self
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 55 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (DPMSolverSinglestepScheduler,)
__SCREAMING_SNAKE_CASE = (('''num_inference_steps''', 25),)
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(__lowerCamelCase,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self,__lowerCamelCase=0,**__lowerCamelCase ):
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('''num_inference_steps''',__lowerCamelCase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A__ = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
A__ = new_scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self,__lowerCamelCase=None,**__lowerCamelCase ):
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 50
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCamelCase ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,algorithm_type='''dpmsolver++''',solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,)
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,prediction_type=__lowerCamelCase,algorithm_type=__lowerCamelCase,)
A__ = self.full_loop(
solver_order=__lowerCamelCase,solver_type=__lowerCamelCase,prediction_type=__lowerCamelCase,algorithm_type=__lowerCamelCase,)
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def UpperCamelCase ( self ):
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase ( self ):
self.check_over_configs(variance_type=__lowerCamelCase )
self.check_over_configs(variance_type='''learned_range''' )
def UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase,time_step=0 )
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(use_karras_sigmas=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''',use_karras_sigmas=__lowerCamelCase )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=__lowerCamelCase,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**__lowerCamelCase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 193 | 0 |
"""simple docstring"""
import sys
import turtle
def _snake_case ( UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : tuple[float, float] ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _snake_case ( UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : int , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCAmelCase_ , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , depth - 1 )
triangle(UpperCAmelCase_ , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , depth - 1 )
triangle(UpperCAmelCase_ , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
SCREAMING_SNAKE_CASE_ : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
SCREAMING_SNAKE_CASE_ : Optional[int] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 69 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 50 ):
A__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 69 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=4 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxRobertaModelTester(self )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
| 55 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314 | 0 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'philschmid/bart-large-cnn-samsum'
__snake_case = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
__snake_case = 'summarizer'
__snake_case = AutoTokenizer
__snake_case = AutoModelForSeqaSeqLM
__snake_case = ['text']
__snake_case = ['text']
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase_ )[0]
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 358 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A__ : Any =4
A__ : int =(1 << p) - 1
for _ in range(p - 2 ):
A__ : Dict =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 136 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowercase__(A , A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any]= XCLIPTextConfig()
# derive patch size from model name
lowercase__ : List[Any]= model_name.find("patch" )
lowercase__ : List[Any]= int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
lowercase__ : Optional[Any]= XCLIPVisionConfig(patch_size=A , num_frames=A )
if "large" in model_name:
lowercase__ : List[Any]= 768
lowercase__ : Any= 3_072
lowercase__ : Optional[int]= 12
lowercase__ : Optional[int]= 1_024
lowercase__ : Optional[int]= 4_096
lowercase__ : Dict= 16
lowercase__ : Union[str, Any]= 24
lowercase__ : str= 768
lowercase__ : Any= 3_072
if model_name == "xclip-large-patch14-16-frames":
lowercase__ : str= 336
lowercase__ : Union[str, Any]= XCLIPConfig.from_text_vision_configs(A , A )
if "large" in model_name:
lowercase__ : Optional[Any]= 768
return config
def lowercase__(A ) ->Union[str, Any]:
"""simple docstring"""
if name == "token_embedding.weight":
lowercase__ : str= name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
lowercase__ : Any= name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
lowercase__ : Any= name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowercase__ : Optional[int]= name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowercase__ : List[str]= name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowercase__ : Tuple= name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
lowercase__ : str= name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
lowercase__ : int= name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
lowercase__ : List[str]= name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
lowercase__ : int= name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
lowercase__ : Dict= name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
lowercase__ : List[str]= name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
lowercase__ : Dict= name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
lowercase__ : Any= name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
lowercase__ : Optional[int]= name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
lowercase__ : int= name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
lowercase__ : List[str]= name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
lowercase__ : List[str]= name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
lowercase__ : Optional[Any]= name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
lowercase__ : List[Any]= name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
lowercase__ : List[str]= name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
lowercase__ : int= name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def lowercase__(A , A ) ->Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[str]= orig_state_dict.pop(A )
if "attn.in_proj" in key:
lowercase__ : Optional[Any]= key.split("." )
if key.startswith("visual" ):
lowercase__ : Tuple= key_split[3]
lowercase__ : Optional[Any]= config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase__ : Optional[int]= val[
:dim, :
]
lowercase__ : List[str]= val[
dim : dim * 2, :
]
lowercase__ : Union[str, Any]= val[
-dim:, :
]
else:
lowercase__ : Tuple= val[
:dim
]
lowercase__ : Optional[int]= val[
dim : dim * 2
]
lowercase__ : int= val[
-dim:
]
else:
if "weight" in key:
lowercase__ : Any= val[
:dim, :
]
lowercase__ : Tuple= val[
dim : dim * 2, :
]
lowercase__ : List[Any]= val[
-dim:, :
]
else:
lowercase__ : Optional[int]= val[:dim]
lowercase__ : Dict= val[
dim : dim * 2
]
lowercase__ : str= val[-dim:]
elif key.startswith("mit" ):
lowercase__ : str= key_split[2]
lowercase__ : List[Any]= config.vision_config.mit_hidden_size
if "weight" in key:
lowercase__ : str= val[:dim, :]
lowercase__ : Tuple= val[dim : dim * 2, :]
lowercase__ : Tuple= val[-dim:, :]
else:
lowercase__ : Dict= val[:dim]
lowercase__ : Any= val[dim : dim * 2]
lowercase__ : Optional[Any]= val[-dim:]
else:
lowercase__ : Optional[Any]= key_split[2]
lowercase__ : Union[str, Any]= config.text_config.hidden_size
if "weight" in key:
lowercase__ : Dict= val[:dim, :]
lowercase__ : Optional[int]= val[
dim : dim * 2, :
]
lowercase__ : Dict= val[-dim:, :]
else:
lowercase__ : Dict= val[:dim]
lowercase__ : Dict= val[
dim : dim * 2
]
lowercase__ : List[Any]= val[-dim:]
else:
lowercase__ : Dict= rename_key(A )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase__ : Tuple= val.T
lowercase__ : Optional[Any]= val
return orig_state_dict
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
if num_frames == 8:
lowercase__ : Optional[Any]= "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
lowercase__ : Optional[Any]= "eating_spaghetti.npy"
elif num_frames == 32:
lowercase__ : Union[str, Any]= "eating_spaghetti_32_frames.npy"
lowercase__ : Union[str, Any]= hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=A , repo_type="dataset" , )
lowercase__ : Optional[int]= np.load(A )
return list(A )
def lowercase__(A , A=None , A=False ) ->int:
"""simple docstring"""
lowercase__ : str= {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
lowercase__ : Any= model_to_url[model_name]
lowercase__ : Dict= 8
if "16-frames" in model_name:
lowercase__ : Dict= 16
elif "shot" in model_name:
lowercase__ : Any= 32
lowercase__ : List[str]= get_xclip_config(A , A )
lowercase__ : Optional[Any]= XCLIPModel(A )
model.eval()
if "drive" in checkpoint_url:
lowercase__ : Union[str, Any]= "pytorch_model.bin"
gdown.cached_download(A , A , quiet=A )
lowercase__ : Tuple= torch.load(A , map_location="cpu" )["model"]
else:
lowercase__ : Union[str, Any]= torch.hub.load_state_dict_from_url(A )["model"]
lowercase__ : int= convert_state_dict(A , A )
lowercase__ : Tuple= XCLIPModel(A )
lowercase__, lowercase__ : Any= model.load_state_dict(A , strict=A )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase__ : Dict= 336 if model_name == "xclip-large-patch14-16-frames" else 224
lowercase__ : Tuple= VideoMAEImageProcessor(size=A )
lowercase__ : str= CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
lowercase__ : Tuple= CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
lowercase__ : str= XCLIPProcessor(image_processor=A , tokenizer=A )
lowercase__ : Any= prepare_video(A )
lowercase__ : Any= processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=A , return_tensors="pt" , padding=A )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
lowercase__ : Tuple= model(**A )
# Verify outputs
lowercase__ : Optional[Any]= outputs.logits_per_video
lowercase__ : Any= logits_per_video.softmax(dim=1 )
print("Probs:" , A )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase__ : Optional[int]= torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase__ : str= torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
lowercase__ : Optional[Any]= torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase__ : Optional[int]= torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
lowercase__ : Optional[Any]= torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase__ : Optional[Any]= torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase__ : Tuple= torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase__ : Union[str, Any]= torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase__ : List[str]= torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase__ : List[str]= torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase__ : int= torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase__ : int= torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase__ : List[Any]= torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase__ : Optional[int]= torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase__ : List[str]= torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase__ : Optional[Any]= torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase__ : Any= torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase__ : Dict= torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(A , A , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(A , organization="nielsr" )
processor.push_to_hub(A , organization="nielsr" )
slow_tokenizer.push_to_hub(A , organization="nielsr" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a : List[str] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 150 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ )
# create a imagenet -> id dictionary for easier use
lowercase__ : int= {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ : Tuple= int(snake_case__ )
lowercase__ : Union[str, Any]= dict(sorted(self.labels.items() ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
lowercase__ : List[Any]= list(snake_case__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 4.0 , snake_case__ = None , snake_case__ = 50 , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : List[Any]= len(snake_case__ )
lowercase__ : Optional[int]= self.transformer.config.sample_size
lowercase__ : List[str]= self.transformer.config.in_channels
lowercase__ : Any= randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , )
lowercase__ : Any= torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ : Tuple= torch.tensor(snake_case__ , device=self.device ).reshape(-1 )
lowercase__ : Any= torch.tensor([1000] * batch_size , device=self.device )
lowercase__ : Tuple= torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ : List[str]= latent_model_input[: len(snake_case__ ) // 2]
lowercase__ : int= torch.cat([half, half] , dim=0 )
lowercase__ : Union[str, Any]= self.scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= t
if not torch.is_tensor(snake_case__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ : List[str]= latent_model_input.device.type == "mps"
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : int= torch.floataa if is_mps else torch.floataa
else:
lowercase__ : Dict= torch.intaa if is_mps else torch.intaa
lowercase__ : Tuple= torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ : Dict= timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int= timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ : Union[str, Any]= self.transformer(
snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample
# perform guidance
if guidance_scale > 1:
lowercase__, lowercase__ : Tuple= noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 )
lowercase__ : str= uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ : Dict= torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ : Optional[int]= torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , snake_case__ , dim=1 )
else:
lowercase__ : int= noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ : List[Any]= self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
if guidance_scale > 1:
lowercase__, lowercase__ : Any= latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ : str= latent_model_input
lowercase__ : Dict= 1 / self.vae.config.scaling_factor * latents
lowercase__ : Any= self.vae.decode(snake_case__ ).sample
lowercase__ : Tuple= (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : List[Any]= samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : Optional[Any]= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case__ )
| 150 | 1 |
UpperCamelCase = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while place < len(snake_case_ ):
if (place + 1 < len(snake_case_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for arabic, roman in ROMAN:
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = divmod(snake_case_ ,snake_case_ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = torch.load(snake_case_ , map_location="cpu" )
_UpperCAmelCase = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
_UpperCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_UpperCAmelCase = v
else:
_UpperCAmelCase = v
_UpperCAmelCase = chkpt["params"]
_UpperCAmelCase = {n: v for n, v in config.items() if not isinstance(snake_case_ , (torch.FloatTensor, numpy.ndarray) )}
_UpperCAmelCase = chkpt["dico_word2id"]
_UpperCAmelCase = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
_UpperCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + "\n" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + "\n" )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 133 | 0 |
_snake_case : int = 256
# Modulus to hash a string
_snake_case : Any = 1000003
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = len(lowerCamelCase_ )
__lowerCAmelCase = len(lowerCamelCase_ )
if p_len > t_len:
return False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCamelCase_ ):
__lowerCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__lowerCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__lowerCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__lowerCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a_ ( ):
__lowerCAmelCase = """abc1abc12"""
__lowerCAmelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCAmelCase = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowerCamelCase_, lowerCamelCase_ ) and not rabin_karp(lowerCamelCase_, lowerCamelCase_ )
# Test 2)
__lowerCAmelCase = """ABABX"""
__lowerCAmelCase = """ABABZABABYABABX"""
assert rabin_karp(lowerCamelCase_, lowerCamelCase_ )
# Test 3)
__lowerCAmelCase = """AAAB"""
__lowerCAmelCase = """ABAAAAAB"""
assert rabin_karp(lowerCamelCase_, lowerCamelCase_ )
# Test 4)
__lowerCAmelCase = """abcdabcy"""
__lowerCAmelCase = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowerCamelCase_, lowerCamelCase_ )
# Test 5)
__lowerCAmelCase = """Lü"""
__lowerCAmelCase = """Lüsai"""
assert rabin_karp(lowerCamelCase_, lowerCamelCase_ )
__lowerCAmelCase = """Lue"""
assert not rabin_karp(lowerCamelCase_, lowerCamelCase_ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 358 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = name
__lowerCAmelCase = value
__lowerCAmelCase = weight
def __repr__( self : Union[str, Any] ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase ( self : int ) -> Optional[int]:
return self.value
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
return self.name
def lowercase ( self : List[Any] ) -> Tuple:
return self.weight
def lowercase ( self : int ) -> Dict:
return self.value / self.weight
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = sorted(lowerCAmelCase_, key=lowerCAmelCase_, reverse=lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase , __lowerCAmelCase = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=a_ ):
'''simple docstring'''
lowercase__ = ["transformers", "torch", "note_seq"]
def __init__( self: Any, *a_: List[str], **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(self, ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: int, *a_: Tuple, **a_: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: List[Any], *a_: Dict, **a_: Any ):
'''simple docstring'''
requires_backends(cls, ["""transformers""", """torch""", """note_seq"""] )
| 64 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase : Optional[int] =parser.parse_args()
UpperCAmelCase : List[Any] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase : List[str] =CLIPImageProcessor()
UpperCAmelCase : Optional[int] =CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase : Any =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 128 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
A__ : str = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
A__ , A__ : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
A__ : Optional[int] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
A__ : Optional[int] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
A__ : int = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 0 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__snake_case :List[Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__a = path + '''.py'''
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__a = path + '''.py'''
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__a = expected_configs[0]
assert expected_config in infos
__a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 49 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : str = n - 1
UpperCAmelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : List[str] = 0
while count < prec:
UpperCAmelCase : int = random.randint(2 , n - 1 )
UpperCAmelCase : List[str] = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCAmelCase : int = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCAmelCase : Dict = False
break
UpperCAmelCase : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase ={
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = IMAGENET_DEFAULT_MEAN ,lowerCamelCase_ = IMAGENET_DEFAULT_STD ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
A = get_resize_output_image_size(lowerCamelCase_ ,size=lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
lowerCamelCase_ ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A = [self.resize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_center_crop:
A = [self.center_crop(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_rescale:
A = [self.rescale(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_normalize:
A = [self.normalize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77 | 0 |
from statistics import mean, stdev
def A_ ( snake_case : Optional[int] , snake_case : Union[str, Any] = 3 ) -> list:
'''simple docstring'''
__UpperCamelCase = min(__lowerCAmelCase )
__UpperCamelCase = max(__lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __lowerCAmelCase ) for x in data]
def A_ ( snake_case : List[str] , snake_case : int = 3 ) -> list:
'''simple docstring'''
__UpperCamelCase = mean(__lowerCAmelCase )
__UpperCamelCase = stdev(__lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , __lowerCAmelCase ) for x in data]
| 328 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = y
for step in range(__lowerCAmelCase ): # noqa: B007
SCREAMING_SNAKE_CASE__ : str = a * a - b * b + x
SCREAMING_SNAKE_CASE__ : Dict = 2 * a * b + y
SCREAMING_SNAKE_CASE__ : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowercase ( __lowerCAmelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowercase ( __lowerCAmelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCAmelCase , 1 , 1 ) )
def _lowercase ( __lowerCAmelCase = 800 , __lowerCAmelCase = 600 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , ) -> Image.Image:
SCREAMING_SNAKE_CASE__ : int = Image.new("""RGB""" , (image_width, image_height) )
SCREAMING_SNAKE_CASE__ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(__lowerCAmelCase ):
for image_y in range(__lowerCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
SCREAMING_SNAKE_CASE__ : str = figure_width / image_width * image_height
SCREAMING_SNAKE_CASE__ : int = figure_center_x + (image_x / image_width - 0.5) * figure_width
SCREAMING_SNAKE_CASE__ : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
SCREAMING_SNAKE_CASE__ : Optional[int] = get_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_color_coded_rgb(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Dict = get_black_and_white_rgb(__lowerCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a :List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 132 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase__ ( a__ = True , *a__ , **a__ ) ->Dict:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
_UpperCamelCase = False
if main_process_only:
_UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*a__ , **a__ , disable=a__ )
| 63 | import re
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
if len(re.findall("[ATCG]" , a__ ) ) != len(a__ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 1 |
from math import factorial
def a( A : int , A : int , A : float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(A , A ) or not isinstance(A , A ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
a = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a = float(factorial(A ) )
coefficient /= factorial(A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 369 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True} )
__A = Features({"audio": Audio()} )
__A = Features({"transcription": Value("string" )} )
__A = "audio"
__A = "transcription"
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
a = copy.deepcopy(self )
a = self.input_schema.copy()
a = features[self.audio_column]
a = input_schema
return task_template
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 71 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=99, lowerCAmelCase__=32, lowerCAmelCase__=5, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=16, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=None, ) -> Optional[int]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def a_ ( self) -> str:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length])
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
snake_case_ = ids_tensor([self.batch_size], self.num_choices)
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, use_stable_embedding=lowerCAmelCase__, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = OpenLlamaModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__)
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[str]:
snake_case_ = True
snake_case_ = OpenLlamaModel(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, )
snake_case_ = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, )
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Tuple:
snake_case_ = OpenLlamaForCausalLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Any:
snake_case_ = True
snake_case_ = True
snake_case_ = OpenLlamaForCausalLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
# first forward pass
snake_case_ = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, use_cache=lowerCAmelCase__, )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3), config.vocab_size)
snake_case_ = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens], dim=-1)
snake_case_ = torch.cat([input_mask, next_mask], dim=-1)
snake_case_ = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, output_hidden_states=lowerCAmelCase__, )['hidden_states'][0]
snake_case_ = model(
lowerCAmelCase__, attention_mask=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, past_key_values=lowerCAmelCase__, output_hidden_states=lowerCAmelCase__, )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,), output_from_past.shape[-1]).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-3))
def a_ ( self) -> str:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Union[str, Any]:
snake_case_ = OpenLlamaModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, hidden_size=37)
def a_ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict['input_ids']
snake_case_ = input_ids.ne(1).to(lowerCAmelCase__)
snake_case_ = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
snake_case_ = OpenLlamaForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def a_ ( self) -> Optional[int]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = 'single_label_classification'
snake_case_ = input_dict['input_ids']
snake_case_ = input_ids.ne(1).to(lowerCAmelCase__)
snake_case_ = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
snake_case_ = OpenLlamaForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def a_ ( self) -> Optional[int]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = 'multi_label_classification'
snake_case_ = input_dict['input_ids']
snake_case_ = input_ids.ne(1).to(lowerCAmelCase__)
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size).to(torch.float)
snake_case_ = OpenLlamaForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def a_ ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def a_ ( self, lowerCAmelCase__) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ids_tensor([1, 10], config.vocab_size)
snake_case_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
snake_case_ = OpenLlamaModel(lowerCAmelCase__)
original_model.to(lowerCAmelCase__)
original_model.eval()
snake_case_ = original_model(lowerCAmelCase__).last_hidden_state
snake_case_ = original_model(lowerCAmelCase__).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
snake_case_ = {'type': scaling_type, 'factor': 10.0}
snake_case_ = OpenLlamaModel(lowerCAmelCase__)
scaled_model.to(lowerCAmelCase__)
scaled_model.eval()
snake_case_ = scaled_model(lowerCAmelCase__).last_hidden_state
snake_case_ = scaled_model(lowerCAmelCase__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-5))
else:
self.assertFalse(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-5))
| 69 | """simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase ( ) -> int:
snake_case_ = HfArgumentParser(UpperCAmelCase )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = TensorFlowBenchmark(args=UpperCAmelCase )
try:
snake_case_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
snake_case_ = ' '.join(str(UpperCAmelCase ).split(' ' )[:-1] )
snake_case_ = ''
snake_case_ = eval(str(UpperCAmelCase ).split(' ' )[-1] )
snake_case_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = full_error_msg + begin_error_msg + str(UpperCAmelCase )
raise ValueError(UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 69 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[int] = '▁'
lowerCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = BigBirdTokenizer
_snake_case = BigBirdTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> List[str]:
super().setUp()
snake_case : List[Any] = self.tokenizer_class(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Dict = """<s>"""
snake_case : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(_A ) , 1_0_0_4 )
def UpperCAmelCase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
snake_case : str = self.get_tokenizer()
snake_case : Any = self.get_rust_tokenizer()
snake_case : Tuple = """I was born in 92000, and this is falsé."""
snake_case : int = tokenizer.tokenize(_A )
snake_case : Any = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
snake_case : List[Any] = tokenizer.encode(_A , add_special_tokens=_A )
snake_case : Union[str, Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case : Tuple = tokenizer.encode(_A )
snake_case : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = BigBirdTokenizer(_A , keep_accents=_A )
snake_case : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
snake_case : Tuple = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase ( self ) -> Dict:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def UpperCAmelCase ( self ) -> Any:
snake_case : Tuple = """Hello World!"""
snake_case : str = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
snake_case : int = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase ( self ) -> List[Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
snake_case : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
snake_case : Optional[int] = """ """.join(_A )
snake_case : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors="""pt""" , return_token_type_ids=_A )
snake_case : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_A )
snake_case : Any = BigBirdConfig(attention_type="""original_full""" )
snake_case : List[Any] = BigBirdModel(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
snake_case : str = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Dict = {"""input_ids""": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 364 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = ConsistencyModelPipeline
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Dict = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def UpperCAmelCase ( self , A=False ) -> Optional[Any]:
if class_cond:
snake_case : List[str] = self.dummy_cond_unet
else:
snake_case : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith("""mps""" ):
snake_case : Union[str, Any] = torch.manual_seed(A )
else:
snake_case : Dict = torch.Generator(device=A ).manual_seed(A )
snake_case : Tuple = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : Dict = ConsistencyModelPipeline(**A )
snake_case : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Any = self.get_dummy_inputs(A )
snake_case : Tuple = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : Tuple = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Tuple = self.get_dummy_components(class_cond=A )
snake_case : Dict = ConsistencyModelPipeline(**A )
snake_case : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : str = self.get_dummy_inputs(A )
snake_case : Optional[int] = 0
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[int] = self.get_dummy_components()
snake_case : List[str] = ConsistencyModelPipeline(**A )
snake_case : Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Dict = self.get_dummy_inputs(A )
snake_case : Tuple = 1
snake_case : Optional[Any] = None
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components(class_cond=A )
snake_case : List[Any] = ConsistencyModelPipeline(**A )
snake_case : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = self.get_dummy_inputs(A )
snake_case : Optional[Any] = 1
snake_case : Any = None
snake_case : Optional[int] = 0
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , A=0 , A=False , A="cpu" , A=torch.floataa , A=(1, 3, 6_4, 6_4) ) -> int:
snake_case : Union[str, Any] = torch.manual_seed(A )
snake_case : Tuple = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
snake_case : Optional[int] = self.get_fixed_latents(seed=A , device=A , dtype=A , shape=A )
snake_case : int = latents
return inputs
def UpperCAmelCase ( self , A=0 , A="cpu" , A=torch.floataa , A=(1, 3, 6_4, 6_4) ) -> Any:
if type(A ) == str:
snake_case : List[str] = torch.device(A )
snake_case : Any = torch.Generator(device=A ).manual_seed(A )
snake_case : Dict = randn_tensor(A , generator=A , device=A , dtype=A )
return latents
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Any = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
snake_case : Tuple = self.get_inputs()
snake_case : List[str] = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : List[str] = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
snake_case : Union[str, Any] = self.get_inputs()
snake_case : Tuple = 1
snake_case : List[str] = None
snake_case : Any = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : List[Any] = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
snake_case : List[str] = self.get_inputs(get_fixed_latents=A , device=A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
snake_case : Tuple = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case : int = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Any = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Dict = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[int] = self.get_inputs(get_fixed_latents=A , device=A )
snake_case : Union[str, Any] = 1
snake_case : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 176 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase_ : Union[str, Any] = "__DUMMY_TRANSFORMERS_USER__"
lowercase_ : Dict = "Dummy User"
lowercase_ : Optional[int] = "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"
lowercase_ : Tuple = "https://hub-ci.huggingface.co"
lowercase_ : Optional[Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
lowercase_ : Tuple = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
lowercase_ : int = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , __lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , __lowerCAmelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , __lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , __lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
HfFolder.save_token(__lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return HfApi(endpoint=__lowerCAmelCase )
@pytest.fixture(scope="session" )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = HfFolder.get_token()
HfFolder.save_token(__lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def _cleanup_repo(snake_case_ ):
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
@contextmanager
def _temporary_repo(snake_case_ ):
try:
yield repo_id
finally:
cleanup_repo(__lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
_UpperCAmelCase = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="dataset" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="data/text_data.txt" , repo_id=__lowerCAmelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
_UpperCAmelCase = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="dataset" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="data.zip" , repo_id=__lowerCAmelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
_UpperCAmelCase = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="dataset" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="data.zip" , repo_id=__lowerCAmelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 133 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "gpt_neox"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str=5_0_4_3_2 , lowerCAmelCase_ : List[Any]=6_1_4_4 , lowerCAmelCase_ : str=4_4 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : Optional[int]=2_4_5_7_6 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=0.25 , lowerCAmelCase_ : int=1_0_0_0_0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : List[Any]=1E-5 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = rotary_pct
lowercase_ = rotary_emb_base
lowercase_ = attention_dropout
lowercase_ = hidden_dropout
lowercase_ = classifier_dropout
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_cache
lowercase_ = tie_word_embeddings
lowercase_ = use_parallel_residual
lowercase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_) or len(self.rope_scaling) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''')
lowercase_ = self.rope_scaling.get("""type""" , lowerCAmelCase_)
lowercase_ = self.rope_scaling.get("""factor""" , lowerCAmelCase_)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 136 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
if openai_config_file == "":
UpperCAmelCase_= OpenAIGPTConfig()
else:
UpperCAmelCase_= OpenAIGPTConfig.from_json_file(lowerCAmelCase_ )
UpperCAmelCase_= OpenAIGPTModel(lowerCAmelCase_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Save pytorch-model
UpperCAmelCase_= pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCAmelCase_= pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() ,lowerCAmelCase_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__A = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 277 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : str=None , ) -> str:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= seq_length
UpperCAmelCase_= is_training
UpperCAmelCase_= use_input_mask
UpperCAmelCase_= use_token_type_ids
UpperCAmelCase_= use_labels
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= type_sequence_label_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= num_labels
UpperCAmelCase_= num_choices
UpperCAmelCase_= scope
UpperCAmelCase_= vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_= None
if self.use_input_mask:
UpperCAmelCase_= random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_= True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> int:
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[int]:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
UpperCAmelCase_= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_= torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_= torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
UpperCAmelCase_= output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_= model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_= ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_= output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Tuple = False
a__ : int = False
a__ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= GPTNeoXModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_= None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase_= GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
UpperCAmelCase_= tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_= """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase_= model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 277 | 1 |
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = {} # Mapping from char to TrieNode
snake_case = False
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self
for char in word:
if char not in curr.nodes:
snake_case = TrieNode()
snake_case = curr.nodes[char]
snake_case = True
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self
for char in word:
if char not in curr.nodes:
return False
snake_case = curr.nodes[char]
return curr.is_leaf
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
def _delete(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> bool:
if index == len(lowerCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case = False
return len(curr.nodes ) == 0
snake_case = word[index]
snake_case = curr.nodes.get(lowerCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case = _delete(lowerCAmelCase , lowerCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowerCAmelCase , 0 )
def lowerCAmelCase__ ( _UpperCamelCase : TrieNode , _UpperCamelCase : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_UpperCamelCase , end=' ' )
for key, value in node.nodes.items():
print_words(_UpperCamelCase , word + key )
def lowerCAmelCase__ ( ) -> bool:
"""simple docstring"""
snake_case = 'banana bananas bandana band apple all beast'.split()
snake_case = TrieNode()
root.insert_many(_UpperCamelCase )
# print_words(root, "")
assert all(root.find(_UpperCamelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : bool ) -> None:
"""simple docstring"""
print(str(_UpperCamelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def lowerCAmelCase__ ( ) -> None:
"""simple docstring"""
assert test_trie()
def lowerCAmelCase__ ( ) -> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 150 | """simple docstring"""
import argparse
import copy
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
snake_case = {}
with open(_UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
with open(_UpperCamelCase ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_UpperCamelCase ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(_UpperCamelCase )
snake_case = distance_of_first_solution + int(_UpperCamelCase )
snake_case = best_node
first_solution.append(_UpperCamelCase )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(_UpperCamelCase )
for kn in solution[1:-1]:
snake_case = solution.index(_UpperCamelCase )
if n == kn:
continue
snake_case = copy.deepcopy(_UpperCamelCase )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(_UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(_UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(_UpperCamelCase , _UpperCamelCase )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(_UpperCamelCase ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(_UpperCamelCase ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(_UpperCamelCase ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase__ ( _UpperCamelCase : int=None ) -> List[str]:
"""simple docstring"""
snake_case = generate_neighbours(args.File )
snake_case ,snake_case = generate_first_solution(
args.File , _UpperCamelCase )
snake_case ,snake_case = tabu_search(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 150 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ :Optional[Any] = logging.get_logger(__name__)
class __a ( UpperCAmelCase ):
_a : int = ['audio_values', 'audio_mask']
def __init__( self , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=[16, 16] , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=44100 , _SCREAMING_SNAKE_CASE=86 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=0.0 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = spectrogram_length
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = feature_size // self.patch_size[1]
_UpperCAmelCase = n_fft
_UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = padding_value
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm='slaney' , mel_scale='slaney' , ).T
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
_UpperCAmelCase = log_spec[:, :-1]
_UpperCAmelCase = log_spec - 20.0
_UpperCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_UpperCAmelCase = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
_UpperCAmelCase = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase = np.ones([len(_SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase = padded_audio_features * self.padding_value
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = audio_features[i]
_UpperCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_UpperCAmelCase = {'audio_values': padded_audio_features}
_UpperCAmelCase = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_inputs
| 185 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ :Tuple = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = ['''BeitFeatureExtractor''']
lowerCAmelCase__ :Optional[Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) < k or k < 0:
raise ValueError('''Invalid Input''' )
A : Any = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
A : Union[str, Any] = current_sum - array[i] + array[i + k]
A : List[Any] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase : int = [randint(-10_00, 10_00) for i in range(1_00)]
lowercase : List[str] = randint(0, 1_10)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 3 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Optional[Any] = logging.get_logger(__name__)
def a ( lowerCamelCase_ , lowerCamelCase_=False ):
'''simple docstring'''
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ = ''''''
else:
lowercase__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dct.pop(lowerCamelCase_ )
lowercase__ = val
def a ( ):
'''simple docstring'''
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ViTConfig()
lowercase__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase__ = True
lowercase__ = int(vit_name[-12:-10] )
lowercase__ = int(vit_name[-9:-6] )
else:
lowercase__ = 1000
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = int(vit_name[-6:-4] )
lowercase__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowercase__ = 192
lowercase__ = 768
lowercase__ = 12
lowercase__ = 3
elif vit_name[9:].startswith('''small''' ):
lowercase__ = 384
lowercase__ = 1536
lowercase__ = 12
lowercase__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowercase__ = 768
lowercase__ = 2304
lowercase__ = 8
lowercase__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowercase__ = 1024
lowercase__ = 4096
lowercase__ = 24
lowercase__ = 16
elif vit_name[4:].startswith('''huge''' ):
lowercase__ = 1280
lowercase__ = 5120
lowercase__ = 32
lowercase__ = 16
# load original model from timm
lowercase__ = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase_ )
lowercase__ = create_rename_keys(lowerCamelCase_ , lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase__ = ViTModel(lowerCamelCase_ ).eval()
else:
lowercase__ = ViTForImageClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase__ = DeiTImageProcessor(size=config.image_size )
else:
lowercase__ = ViTImageProcessor(size=config.image_size )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase__ = encoding['''pixel_values''']
lowercase__ = model(lowerCamelCase_ )
if base_model:
lowercase__ = timm_model.forward_features(lowerCamelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase_ , outputs.pooler_output , atol=1e-3 )
else:
lowercase__ = timm_model(lowerCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase_ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a : Dict = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class a ( __lowerCamelCase ):
def __init__( self : Any , *lowercase_ : Tuple , **lowercase_ : Dict ):
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A_ ( self : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : List[str]=None , lowercase_ : str=None ):
snake_case_ = {}
snake_case_ = {}
if prompt is not None:
snake_case_ = prompt
if generate_kwargs is not None:
snake_case_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
snake_case_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : List[Any] ):
return super().__call__(__lowercase , **__lowercase )
def A_ ( self : str , lowercase_ : List[str] , lowercase_ : str=None ):
snake_case_ = load_image(__lowercase )
if prompt is not None:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(
F"Received an invalid text input, got - {type(__lowercase )} - but expected a single string. "
'''Note also that one single text can be provided for conditional image to text generation.''' )
snake_case_ = self.model.config.model_type
if model_type == "git":
snake_case_ = self.image_processor(images=__lowercase , return_tensors=self.framework )
snake_case_ = self.tokenizer(text=__lowercase , add_special_tokens=__lowercase ).input_ids
snake_case_ = [self.tokenizer.cls_token_id] + input_ids
snake_case_ = torch.tensor(__lowercase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
snake_case_ = self.image_processor(images=__lowercase , header_text=__lowercase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case_ = self.image_processor(images=__lowercase , return_tensors=self.framework )
snake_case_ = self.tokenizer(__lowercase , return_tensors=self.framework )
model_inputs.update(__lowercase )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
snake_case_ = self.image_processor(images=__lowercase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case_ = None
return model_inputs
def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : int=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowercase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
snake_case_ = None
if generate_kwargs is None:
snake_case_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case_ = model_inputs.pop(self.model.main_input_name )
snake_case_ = self.model.generate(__lowercase , **__lowercase , **__lowercase )
return model_outputs
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
snake_case_ = []
for output_ids in model_outputs:
snake_case_ = {
'''generated_text''': self.tokenizer.decode(
__lowercase , skip_special_tokens=__lowercase , )
}
records.append(__lowercase )
return records
| 350 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
a : int = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path_factory.getbasetemp() / '''cache'''
snake_case_ = test_hf_cache_home / '''datasets'''
snake_case_ = test_hf_cache_home / '''metrics'''
snake_case_ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(__UpperCAmelCase ) )
@pytest.fixture(autouse=__UpperCAmelCase, scope='''session''' )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', __UpperCAmelCase )
| 72 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
UpperCAmelCase__ = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
UpperCAmelCase__ = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase__ = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 0 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Tuple = 5
# Realm tok
UpperCAmelCase__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def _a (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = RealmConfig(num_block_records=self.num_block_records )
return config
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=_lowerCamelCase , )
return block_records
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.get_config()
UpperCAmelCase__ : Dict = self.get_dummy_retriever()
UpperCAmelCase__ : str = retriever.tokenizer
UpperCAmelCase__ : Dict = np.array([0, 3] , dtype="""long""" )
UpperCAmelCase__ : Tuple = tokenizer(["""Test question"""] ).input_ids
UpperCAmelCase__ : List[str] = tokenizer(
["""the fourth"""] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
UpperCAmelCase__ : List[Any] = config.reader_seq_len
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors="""np""" )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.get_config()
UpperCAmelCase__ : Any = self.get_dummy_retriever()
UpperCAmelCase__ : int = retriever.tokenizer
UpperCAmelCase__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" )
UpperCAmelCase__ : List[str] = tokenizer(["""Test question"""] ).input_ids
UpperCAmelCase__ : List[str] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
UpperCAmelCase__ : Dict = config.reader_seq_len
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors="""np""" )
self.assertEqual([False, True, True] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
UpperCAmelCase__ : List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
UpperCAmelCase__ : List[Any] = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 166 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="max_length" , _lowerCamelCase="np" , **_lowerCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
UpperCAmelCase__ : Any = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
UpperCAmelCase__ : Any = []
# Maximum number of queries across batch
UpperCAmelCase__ : int = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ : Optional[int] = t + [""" """] * (max_num_queries - len(_lowerCamelCase ))
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase__ : Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase__ : Dict = BatchEncoding()
UpperCAmelCase__ : int = input_ids
UpperCAmelCase__ : Optional[int] = attention_mask
if query_images is not None:
UpperCAmelCase__ : int = BatchEncoding()
UpperCAmelCase__ : Optional[int] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
UpperCAmelCase__ : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 166 | 1 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase__ : str = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase__ : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase__ : Optional[int] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 25 | """simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCAmelCase_ :
def __init__( self , a ) -> List[str]:
if isinstance(a , a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase__ : Optional[Any] = deepcopy(a )
elif os.path.exists(a ):
with io.open(a , 'r' , encoding='utf-8' ) as f:
lowercase__ : List[Any] = json.load(a )
else:
try:
lowercase__ : Optional[int] = baseaa.urlsafe_baadecode(a ).decode('utf-8' )
lowercase__ : List[str] = json.loads(a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowercase__ : Any = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ) -> Dict:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase__ : Tuple = self.get_value('zero_optimization.stage' , -1 )
# offload
lowercase__ : int = False
if self.is_zeroa() or self.is_zeroa():
lowercase__ : str = set(['cpu', 'nvme'] )
lowercase__ : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase__ : Optional[Any] = True
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Dict = self.config
# find the config node of interest if it exists
lowercase__ : int = ds_key_long.split('.' )
lowercase__ : Dict = nodes.pop()
for node in nodes:
lowercase__ : Optional[Any] = config.get(a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , a , a=None ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = self.find_config_node(a )
if config is None:
return default
return config.get(a , a )
def _UpperCAmelCase ( self , a , a=False ) -> Any:
lowercase__ : str = self.config
# find the config node of interest if it exists
lowercase__ : List[Any] = ds_key_long.split('.' )
for node in nodes:
lowercase__ : str = config
lowercase__ : str = config.get(a )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a )
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : Union[str, Any] = self.get_value(a )
return False if value is None else bool(a )
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Any = self.get_value(a )
return False if value is None else not bool(a )
def _UpperCAmelCase ( self ) -> Tuple:
return self._stage == 2
def _UpperCAmelCase ( self ) -> List[Any]:
return self._stage == 3
def _UpperCAmelCase ( self ) -> str:
return self._offload
class UpperCAmelCase_ :
def __init__( self , a ) -> str:
lowercase__ : Tuple = engine
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
# runs backpropagation and handles mixed precision
self.engine.backward(a , **a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCAmelCase_ ( _a):
def __init__( self , a ) -> Dict:
super().__init__(a , device_placement=a , scaler=a )
lowercase__ : Union[str, Any] = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , a=None ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ) -> Tuple:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCAmelCase_ ( _a):
def __init__( self , a , a ) -> Any:
super().__init__(a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCAmelCase_ :
def __init__( self , a , a=0.001 , a=0 , **a ) -> Tuple:
lowercase__ : List[Any] = params
lowercase__ : int = lr
lowercase__ : int = weight_decay
lowercase__ : Union[str, Any] = kwargs
class UpperCAmelCase_ :
def __init__( self , a , a=None , a=0 , **a ) -> Tuple:
lowercase__ : Dict = optimizer
lowercase__ : List[str] = total_num_steps
lowercase__ : Optional[int] = warmup_num_steps
lowercase__ : List[Any] = kwargs
| 77 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=3 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ):
lowercase__ : List[Any] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Any = is_training
lowercase__ : str = use_input_mask
lowercase__ : str = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Optional[int] = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Union[str, Any] = num_choices
lowercase__ : Any = scope
def snake_case_ ( self):
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Union[str, Any] = None
if self.use_input_mask:
lowercase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
lowercase__ : Tuple = None
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a , )
def snake_case_ ( self , a , a , a , a , a , a , a):
lowercase__ : str = FalconModel(config=a)
model.to(a)
model.eval()
lowercase__ : Dict = model(a , attention_mask=a)
lowercase__ : List[str] = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a , a , a , a , a , a , a , a , a , ):
lowercase__ : Optional[Any] = True
lowercase__ : Optional[int] = FalconModel(a)
model.to(a)
model.eval()
lowercase__ : int = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowercase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowercase__ : Any = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a , a , a , a , a , a , a , a , a , ):
lowercase__ : Tuple = FalconForCausalLM(config=a)
model.to(a)
model.eval()
lowercase__ : List[Any] = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a , a , a , a , a , a , a , a , a , ):
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = FalconForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
lowercase__ : Dict = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowercase__ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowercase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1)
lowercase__ : Any = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowercase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowercase__ : Any = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3))
def snake_case_ ( self):
lowercase__ : Any = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (FalconForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Union[str, Any] = False
def snake_case_ ( self):
lowercase__ : List[Any] = FalconModelTester(self)
lowercase__ : str = ConfigTester(self , config_class=a , hidden_size=37)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def snake_case_ ( self):
lowercase__ , *lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowercase__ : int = alibi
self.model_tester.create_and_check_model(a , *a)
def snake_case_ ( self):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = 3
lowercase__ : List[Any] = input_dict['input_ids']
lowercase__ : Dict = input_ids.ne(1).to(a)
lowercase__ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__ : Optional[Any] = FalconForSequenceClassification(a)
model.to(a)
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def snake_case_ ( self):
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : str = 'single_label_classification'
lowercase__ : Optional[int] = input_dict['input_ids']
lowercase__ : List[Any] = input_ids.ne(1).to(a)
lowercase__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__ : List[str] = FalconForSequenceClassification(a)
model.to(a)
model.eval()
lowercase__ : int = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def snake_case_ ( self):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = input_dict['input_ids']
lowercase__ : Union[str, Any] = FalconForCausalLM(a)
model.to(a)
model.eval()
lowercase__ : List[str] = model(a , use_cache=a)
lowercase__ : Dict = input_ids.shape[0]
lowercase__ : Optional[int] = model._convert_to_rw_cache(result.past_key_values)
lowercase__ : List[str] = model._convert_cache_to_standard_format(a , a)
for layer in range(len(a)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def snake_case_ ( self):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Dict = 'multi_label_classification'
lowercase__ : Optional[Any] = input_dict['input_ids']
lowercase__ : List[str] = input_ids.ne(1).to(a)
lowercase__ : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
lowercase__ : List[Any] = FalconForSequenceClassification(a)
model.to(a)
model.eval()
lowercase__ : str = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def snake_case_ ( self):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a , 'use_cache'):
return
lowercase__ : Union[str, Any] = model_class(a).to(a)
if "use_cache" not in inputs:
lowercase__ : Tuple = True
lowercase__ : Optional[int] = model(**a)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowercase__ : int = (
getattr(a , 'decoder_layers' , a)
or getattr(a , 'num_decoder_layers' , a)
or config.num_hidden_layers
)
lowercase__ : List[str] = getattr(a , 'num_kv_heads' , config.num_attention_heads)
lowercase__ : str = getattr(a , 'd_model' , config.hidden_size)
lowercase__ : int = embed_dim // num_attention_heads
lowercase__ : Any = outputs['past_key_values']
self.assertEqual(len(a) , a)
lowercase__ , lowercase__ : Dict = inputs['input_ids'].shape
for i in range(a):
if config.new_decoder_architecture:
lowercase__ : int = config.num_attention_heads
elif config.multi_query:
lowercase__ : List[str] = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@slow
def snake_case_ ( self):
lowercase__ : int = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b')
lowercase__ : Union[str, Any] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b')
model.eval()
model.to(a)
lowercase__ : Tuple = tokenizer('My favorite food is' , return_tensors='pt').to(a)
lowercase__ : str = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowercase__ : Union[str, Any] = model.generate(**a , do_sample=a , max_new_tokens=19)
lowercase__ : List[Any] = tokenizer.batch_decode(a)[0]
self.assertEqual(a , a)
@slow
def snake_case_ ( self):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(a)
lowercase__ : Optional[Any] = FalconForCausalLM.from_pretrained(a)
model.eval()
model.to(a)
lowercase__ : Union[str, Any] = tokenizer('My favorite food is' , return_tensors='pt').to(a)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a , do_sample=a , max_new_tokens=4)
model.generate(**a , do_sample=a , max_new_tokens=4)
model.generate(**a , num_beams=2 , max_new_tokens=4)
@slow
def snake_case_ ( self):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowercase__ : Dict = AutoTokenizer.from_pretrained(a)
lowercase__ : Dict = FalconForCausalLM.from_pretrained(a)
model.eval()
model.to(device=a)
lowercase__ : str = tokenizer('My favorite food is' , return_tensors='pt').to(a)
# Test results are the same with and without cache
lowercase__ : Tuple = model.generate(**a , do_sample=a , max_new_tokens=20 , use_cache=a)
lowercase__ : List[str] = model.generate(**a , do_sample=a , max_new_tokens=20 , use_cache=a)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 216 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase_ : Dict = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowerCAmelCase_ : List[str] = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
lowerCAmelCase_ : Optional[Any] = '|'.join(sys.argv[1:])
lowerCAmelCase_ : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
lowerCAmelCase_ : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 1 |
import mpmath # for roots of unity
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self , A=None , A=None ) -> Union[str, Any]:
# Input as list
snake_case : Optional[int] = list(poly_a or [0] )[:]
snake_case : Dict = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
snake_case : Optional[Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
snake_case : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
snake_case : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
snake_case : List[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
snake_case : Any = self.__multiply()
def UpperCAmelCase ( self , A ) -> int:
snake_case : List[str] = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(_lowerCAmelCase ) <= 1:
return dft[0]
#
snake_case : Any = self.c_max_length // 2
while next_ncol > 0:
snake_case : List[str] = [[] for i in range(_lowerCAmelCase )]
snake_case : Any = self.root**next_ncol
# First half of next step
snake_case : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
snake_case : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
snake_case : List[Any] = new_dft
snake_case : Union[str, Any] = next_ncol // 2
return dft[0]
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = self.__dft("""A""" )
snake_case : Optional[int] = self.__dft("""B""" )
snake_case : Tuple = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
snake_case : Tuple = 2
while next_ncol <= self.c_max_length:
snake_case : Dict = [[] for i in range(_lowerCAmelCase )]
snake_case : Dict = self.root ** (next_ncol // 2)
snake_case : Optional[Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
snake_case : Any = new_inverse_c
next_ncol *= 2
# Unpack
snake_case : Any = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Dict:
snake_case : Any = """A = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
snake_case : Dict = """B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
snake_case : Optional[Any] = """A*B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import functools
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
# Validation
if not isinstance(lowercase ,lowercase ) or not all(isinstance(lowercase ,lowercase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(lowercase ) != 3 or not all(isinstance(lowercase ,lowercase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(lowercase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
snake_case : List[str] = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[str] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModel.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =AutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =AutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : int =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =AutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] =AutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[Any] =AutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : str =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : int =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
| 71 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCAmelCase__ : List[str] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_A ) , torch_builtin(_A ) ) )
self.assertFalse(torch.allclose(gelu_python(_A ) , gelu_new(_A ) ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCAmelCase__ : Tuple = get_activation('''gelu''' )
UpperCAmelCase__ : List[str] = get_activation('''gelu_10''' )
UpperCAmelCase__ : Union[str, Any] = torch_builtin(_A )
UpperCAmelCase__ : List[str] = geluaa(_A )
UpperCAmelCase__ : List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_A ):
get_activation('''bogus''' )
with self.assertRaises(_A ):
get_activation(_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = get_activation('''gelu''' )
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : List[Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_A ):
UpperCAmelCase__ : Dict = acta.a
| 350 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = XLMRobertaTokenizer
lowerCAmelCase__ = XLMRobertaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''<pad>'''
UpperCAmelCase__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_002 )
def lowercase_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A )
UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A )
UpperCAmelCase__ : str = pickle.dumps(_A )
pickle.loads(_A )
def lowercase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Any = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.encode(_A )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = '''Hello World!'''
UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase__ : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 299 | 0 |
def A_ ( A__ , A__ , A__ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
a__ : Union[str, Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
a__ : Optional[int] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__snake_case = parser.parse_args()
__snake_case = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 176 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
UpperCAmelCase = False
UpperCAmelCase = False
def lowerCamelCase (a_ :Namespace) -> Union[str, Any]:
return TrainCommand(a_)
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
lowercase :List[Any] = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=snake_case__ , required=snake_case__ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=snake_case__ , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=snake_case__ , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=snake_case__ , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=snake_case__ , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=snake_case__ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=snake_case__ , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=snake_case__ , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=snake_case__ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=snake_case__ , default=3_2 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=snake_case__ , default=6_4 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=snake_case__ , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=snake_case__ , default=1e-0_8 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=snake_case__ )
def __init__( self : Any , snake_case__ : Namespace ):
'''simple docstring'''
lowercase :Dict = logging.get_logger('''transformers-cli/training''' )
lowercase :int = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=snake_case__ )
lowercase :Dict = args.output
lowercase :Optional[Any] = args.column_label
lowercase :List[Any] = args.column_text
lowercase :Any = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowercase :Optional[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowercase :List[str] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase :Optional[Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowercase :Any = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase :List[Any] = args.validation_split
lowercase :Dict = args.train_batch_size
lowercase :int = args.valid_batch_size
lowercase :str = args.learning_rate
lowercase :Tuple = args.adam_epsilon
def __snake_case ( self : List[Any] ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError
def __snake_case ( self : str ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 172 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
UpperCAmelCase = {'''facebook/blenderbot-3B''': 128}
class __magic_name__ ( __UpperCAmelCase ):
__A : Any = VOCAB_FILES_NAMES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = ["input_ids", "attention_mask"]
__A : Optional[Any] = BlenderbotTokenizer
def __init__( self : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , snake_case__ : Dict="replace" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Any="</s>" , snake_case__ : Any="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : int=False , snake_case__ : List[Any]=True , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
lowercase :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space:
lowercase :int = getattr(snake_case__ , pre_tok_state.pop('''type''' ) )
lowercase :List[str] = add_prefix_space
lowercase :Any = pre_tok_class(**snake_case__ )
lowercase :Tuple = add_prefix_space
lowercase :List[Any] = '''post_processor'''
lowercase :Optional[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
lowercase :int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase :List[Any] = tuple(state['''sep'''] )
if "cls" in state:
lowercase :List[str] = tuple(state['''cls'''] )
lowercase :Dict = False
if state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space:
lowercase :str = add_prefix_space
lowercase :int = True
if state.get('''trim_offsets''' , snake_case__ ) != trim_offsets:
lowercase :List[str] = trim_offsets
lowercase :Optional[Any] = True
if changes_to_apply:
lowercase :Optional[Any] = getattr(snake_case__ , state.pop('''type''' ) )
lowercase :List[Any] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __snake_case ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self : Dict , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
lowercase :List[str] = value
def __snake_case ( self : int , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
'''simple docstring'''
lowercase :int = kwargs.get('''is_split_into_words''' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : str ):
'''simple docstring'''
lowercase :int = kwargs.get('''is_split_into_words''' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def __snake_case ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
lowercase :Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def __snake_case ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase :Optional[Any] = [self.sep_token_id]
lowercase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __snake_case ( self : List[str] , snake_case__ : "Conversation" ):
'''simple docstring'''
lowercase :str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case__ )
lowercase :Tuple = ''' '''.join(snake_case__ )
lowercase :Optional[int] = self.encode(snake_case__ )
if len(snake_case__ ) > self.model_max_length:
lowercase :Optional[int] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 172 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
a_ :str = False
a_ :Tuple = False
def lowercase_ (A : Namespace ):
return TrainCommand(A )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@staticmethod
def lowercase_ ( _snake_case : ArgumentParser ) ->Tuple:
snake_case__ : str = parser.add_parser('train', help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data', type=_snake_case, required=_snake_case, help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.', )
train_parser.add_argument(
'--column_label', type=_snake_case, default=0, help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text', type=_snake_case, default=1, help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id', type=_snake_case, default=2, help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row', action='store_true', help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data', type=_snake_case, default='', help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split', type=_snake_case, default=0.1, help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.', )
train_parser.add_argument('--output', type=_snake_case, default='./', help='path to saved the trained model.' )
train_parser.add_argument(
'--task', type=_snake_case, default='text_classification', help='Task to train the model on.' )
train_parser.add_argument(
'--model', type=_snake_case, default='bert-base-uncased', help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size', type=_snake_case, default=3_2, help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size', type=_snake_case, default=6_4, help='Batch size for validation.' )
train_parser.add_argument('--learning_rate', type=_snake_case, default=3e-5, help='Learning rate.' )
train_parser.add_argument('--adam_epsilon', type=_snake_case, default=1e-08, help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_snake_case )
def __init__( self : str, _snake_case : Namespace ) ->Union[str, Any]:
snake_case__ : Tuple = logging.get_logger('transformers-cli/training' )
snake_case__ : Union[str, Any] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output, exist_ok=_snake_case )
snake_case__ : Union[str, Any] = args.output
snake_case__ : List[str] = args.column_label
snake_case__ : Tuple = args.column_text
snake_case__ : Tuple = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
snake_case__ : Dict = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
snake_case__ : int = Processor.create_from_csv(
args.train_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
snake_case__ : List[Any] = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
snake_case__ : Union[str, Any] = Processor.create_from_csv(
args.validation_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
snake_case__ : Dict = args.validation_split
snake_case__ : Optional[int] = args.train_batch_size
snake_case__ : Union[str, Any] = args.valid_batch_size
snake_case__ : Union[str, Any] = args.learning_rate
snake_case__ : List[str] = args.adam_epsilon
def lowercase_ ( self : str ) ->Dict:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase_ ( self : List[str] ) ->List[str]:
raise NotImplementedError
def lowercase_ ( self : Any ) ->int:
self.pipeline.fit(
self.train_dataset, validation_data=self.valid_dataset, validation_split=self.validation_split, learning_rate=self.learning_rate, adam_epsilon=self.adam_epsilon, train_batch_size=self.train_batch_size, valid_batch_size=self.valid_batch_size, )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 277 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Union[str, Any] =logging.get_logger(__name__)
__snake_case : Optional[int] ={
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""beit"""
def __init__(self ,__lowerCamelCase=81_92 ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=2_24 ,__lowerCamelCase=16 ,__lowerCamelCase=3 ,__lowerCamelCase=False ,__lowerCamelCase=False ,__lowerCamelCase=False ,__lowerCamelCase=False ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=True ,__lowerCamelCase=[3, 5, 7, 11] ,__lowerCamelCase=[1, 2, 3, 6] ,__lowerCamelCase=True ,__lowerCamelCase=0.4 ,__lowerCamelCase=2_56 ,__lowerCamelCase=1 ,__lowerCamelCase=False ,__lowerCamelCase=2_55 ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : Optional[int] = patch_size
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : str = use_mask_token
lowerCAmelCase__ : str = use_absolute_position_embeddings
lowerCAmelCase__ : str = use_relative_position_bias
lowerCAmelCase__ : Union[str, Any] = use_shared_relative_position_bias
lowerCAmelCase__ : List[str] = layer_scale_init_value
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ : Optional[Any] = out_indices
lowerCAmelCase__ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ : Union[str, Any] = use_auxiliary_head
lowerCAmelCase__ : Dict = auxiliary_loss_weight
lowerCAmelCase__ : Optional[int] = auxiliary_channels
lowerCAmelCase__ : List[str] = auxiliary_num_convs
lowerCAmelCase__ : List[str] = auxiliary_concat_input
lowerCAmelCase__ : Dict = semantic_loss_ignore_index
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =version.parse("""1.11""")
@property
def lowerCAmelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase__ (self ) -> float:
"""simple docstring"""
return 1e-4
| 94 |
__snake_case : Any ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Tuple ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 94 | 1 |
'''simple docstring'''
import qiskit
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
__lowerCamelCase : str = qubits
# Using Aer's simulator
__lowerCamelCase : Optional[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
__lowerCamelCase : Union[str, Any] = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase_ ) ) , list(range(UpperCAmelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowerCamelCase : Tuple = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=10_00 )
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 185 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A__ : Optional[int] = logging.get_logger(__name__)
A__ : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
A__ : Tuple = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) -> Tuple:
for attribute in key.split('.' ):
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
__lowerCamelCase : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
__lowerCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
__lowerCamelCase : Optional[int] = value
elif weight_type == "weight_v":
__lowerCamelCase : str = value
elif weight_type == "bias":
__lowerCamelCase : List[Any] = value
else:
__lowerCamelCase : List[str] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = fairseq_model.state_dict()
__lowerCamelCase : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : int = True
if "*" in mapped_key:
__lowerCamelCase : Optional[int] = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
__lowerCamelCase : List[str] = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
__lowerCamelCase : Dict = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : Any = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase : Any = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : List[Any] = 'weight'
else:
__lowerCamelCase : str = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Tuple:
__lowerCamelCase : List[str] = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : List[Any] = name.split('.' )
__lowerCamelCase : Any = int(items[0] )
__lowerCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowerCamelCase : Union[str, Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowerCamelCase : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowerCamelCase : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowerCamelCase : List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=None ) -> Optional[int]:
# load the pre-trained checkpoints
__lowerCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCamelCase : Union[str, Any] = WavLMOrig(UpperCAmelCase_ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCamelCase : Optional[int] = WavLMConfig.from_pretrained(UpperCAmelCase_ )
else:
__lowerCamelCase : Any = WavLMConfig()
__lowerCamelCase : Optional[int] = WavLMModel(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ )
hf_wavlm.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
A__ : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 185 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class lowercase ( __A ):
__SCREAMING_SNAKE_CASE : List[str] = 'falcon'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
def __init__( self , snake_case=6_5024 , snake_case=4544 , snake_case=32 , snake_case=71 , snake_case=1e-5 , snake_case=0.02 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=None , snake_case=False , snake_case=False , snake_case=True , snake_case=True , snake_case=False , snake_case=11 , snake_case=11 , **snake_case , ):
snake_case_ = vocab_size
# Backward compatibility with n_embed kwarg
snake_case_ = kwargs.pop('n_embed' , __lowercase )
snake_case_ = hidden_size if n_embed is None else n_embed
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = use_cache
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case_ = alibi
snake_case_ = new_decoder_architecture
snake_case_ = multi_query # Ignored when new_decoder_architecture is True
snake_case_ = parallel_attn
snake_case_ = bias
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
@property
def a ( self ):
return self.hidden_size // self.num_attention_heads
@property
def a ( self ):
return not self.alibi
| 359 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(snake_case )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case )
snake_case_ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case , atol=1e-4 ) )
| 200 | 0 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = RoFormerTokenizer
__lowercase = RoFormerTokenizerFast
__lowercase = True
__lowercase = True
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowerCAmelCase_ )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '永和服装饰品有限公司,今天天气非常好'
_snake_case = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
_snake_case , _snake_case = self.get_chinese_input_output_texts()
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , output_text.split() )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_rust_tokenizer()
_snake_case , _snake_case = self.get_chinese_input_output_texts()
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , output_text.split() )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 42 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =GPTaTokenizer
lowerCamelCase__ =GPTaTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ ={'add_prefix_space': True}
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__snake_case : int = dict(zip(a_ , range(len(a_ ) ) ) )
__snake_case : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case : int = {'''unk_token''': '''<unk>'''}
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a_ ) )
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = '''lower newer'''
__snake_case : Tuple = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : Optional[Any] = '''lower newer'''
__snake_case : int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case : Tuple = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
__snake_case : str = tokens + [tokenizer.unk_token]
__snake_case : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a_ )
__snake_case : List[Any] = '''lower newer'''
# Testing tokenization
__snake_case : Any = tokenizer.tokenize(a_ , add_prefix_space=a_ )
__snake_case : Optional[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
__snake_case : Dict = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
__snake_case : int = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
__snake_case : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a_ )
__snake_case : Dict = tokenizer.encode(a_ , add_prefix_space=a_ )
__snake_case : List[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
__snake_case : Dict = tokens + [rust_tokenizer.unk_token]
__snake_case : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self , a_=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
__snake_case : Optional[Any] = '''This is a simple input'''
__snake_case : int = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case : List[Any] = ('''This is a simple input''', '''This is a pair''')
__snake_case : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='''max_length''' )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='''max_length''' )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='''max_length''' , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='''max_length''' )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='''max_length''' )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__snake_case : Dict = '''This is a simple input'''
__snake_case : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
__snake_case : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
__snake_case : List[str] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__snake_case : Optional[int] = tokenizer.pad_token_id
__snake_case : Tuple = tokenizer(a_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__snake_case : Union[str, Any] = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='''np''' )
__snake_case : List[Any] = tokenizer(*a_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__snake_case : Dict = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''$$$'''
__snake_case : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
__snake_case : Any = '''This is a simple input'''
__snake_case : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case : List[Any] = tokenizer.bos_token_id
__snake_case : Optional[int] = tokenizer(a_ )
__snake_case : List[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
__snake_case : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__snake_case : str = '''Encode this.'''
__snake_case : Optional[int] = '''This one too please.'''
__snake_case : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case : int = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
__snake_case : List[str] = encoded_sequence_dict['''input_ids''']
__snake_case : Optional[Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(a_ ) , len(a_ ) )
__snake_case : Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
__snake_case : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a_ )
__snake_case : List[Any] = '''A photo of a cat'''
__snake_case : Union[str, Any] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
__snake_case : Tuple = AutoTokenizer.from_pretrained('''./test_opt''' )
__snake_case : int = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=a_ )
__snake_case : int = '''A photo of a cat'''
__snake_case : Tuple = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a_ )
__snake_case : Any = '''bos'''
__snake_case : List[Any] = tokenizer.get_vocab()['''bos''']
__snake_case : Any = '''A photo of a cat'''
__snake_case : Tuple = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
__snake_case : Tuple = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__snake_case : List[Any] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCamelCase = logging.get_logger(__name__)
def _A ( ):
"""simple docstring"""
__lowercase =os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowercase =json.loads(_lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowercase =os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowercase =json.loads(_lowerCAmelCase )
if not mpi_options.get('sagemaker_mpi_enabled' , _lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _lowerCAmelCase , )
@cached_property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
logger.info('PyTorch: setting up devices')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch')
if self.no_cuda:
__lowercase =torch.device('cpu')
__lowercase =0
elif is_sagemaker_model_parallel_available():
__lowercase =smp.local_rank()
__lowercase =torch.device('cuda' , _lowerCAmelCase)
__lowercase =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta)
__lowercase =int(os.getenv('SMDATAPARALLEL_LOCAL_RANK'))
__lowercase =torch.device('cuda' , self.local_rank)
__lowercase =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowercase =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta)
__lowercase =torch.device('cuda' , self.local_rank)
__lowercase =1
if device.type == "cuda":
torch.cuda.set_device(_lowerCAmelCase)
return device
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return False
| 166 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """OwlViTImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="max_length" , _lowerCAmelCase : Optional[Any]="np" , **_lowerCAmelCase : Any):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase) or (isinstance(_lowerCAmelCase , _lowerCAmelCase) and not isinstance(text[0] , _lowerCAmelCase)):
__lowercase =[self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(text[0] , _lowerCAmelCase):
__lowercase =[]
# Maximum number of queries across batch
__lowercase =max([len(_lowerCAmelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCAmelCase) != max_num_queries:
__lowercase =t + [' '] * (max_num_queries - len(_lowerCAmelCase))
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
encodings.append(_lowerCAmelCase)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
__lowercase =np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase =jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase =torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
__lowercase =torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase =tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
__lowercase =BatchEncoding()
__lowercase =input_ids
__lowercase =attention_mask
if query_images is not None:
__lowercase =BatchEncoding()
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase).pixel_values
__lowercase =query_pixel_values
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 166 | 1 |
'''simple docstring'''
from typing import Any
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =data
lowerCamelCase_ =None
def __repr__( self ):
"""simple docstring"""
return f'''Node({self.data})'''
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ =None
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =self.head
while node:
yield node.data
lowerCamelCase_ =node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(lowerCAmelCase ) for item in self] )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
lowerCamelCase_ =self.head
for _ in range(lowerCAmelCase ):
lowerCamelCase_ =current.next
lowerCamelCase_ =data
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(len(self ), lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(0, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
lowerCamelCase_ =Node(lowerCAmelCase )
if self.head is None:
lowerCamelCase_ =new_node
elif index == 0:
lowerCamelCase_ =self.head # link new_node to head
lowerCamelCase_ =new_node
else:
lowerCamelCase_ =self.head
for _ in range(index - 1 ):
lowerCamelCase_ =temp.next
lowerCamelCase_ =temp.next
lowerCamelCase_ =new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self, lowerCAmelCase = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
lowerCamelCase_ =self.head # default first node
if index == 0:
lowerCamelCase_ =self.head.next
else:
lowerCamelCase_ =self.head
for _ in range(index - 1 ):
lowerCamelCase_ =temp.next
lowerCamelCase_ =temp.next
lowerCamelCase_ =temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =None
lowerCamelCase_ =self.head
while current:
# Store the current node's next node.
lowerCamelCase_ =current.next
# Make the current node's next point backwards
lowerCamelCase_ =prev
# Make the previous node be the current node
lowerCamelCase_ =current
# Make the current node the next node (to progress iteration)
lowerCamelCase_ =next_node
# Return prev in order to put the head at the end
lowerCamelCase_ =prev
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase_ =LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCamelCase_ =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase_ =[
-9,
100,
Node(7734_5112 ),
'''dlrow olleH''',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowerCamelCase_ =LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCamelCase_ =linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCamelCase_ =linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCamelCase_ =linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a_ ( ) -> Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCamelCase_ =LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__snake_case )
print('''\nReading/changing Node data using indexing:''' )
print(F'''Element at Position 1: {linked_list[1]}''' )
lowerCamelCase_ =input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__snake_case )
print(F'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=[] ):
__a : Optional[Any] = size[0] - overlap_pixels * 2
__a : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__a : Dict = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
__a : Tuple = np.pad(lowerCAmelCase__ , mode='''linear_ramp''' , pad_width=lowerCAmelCase__ , end_values=0 )
if "l" in remove_borders:
__a : Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__a : Union[str, Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__a : Tuple = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__a : int = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ):
return max(lowerCAmelCase__ , min(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __UpperCamelCase ( lowerCAmelCase__ : [int] , lowerCAmelCase__ : [int] , lowerCAmelCase__ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __UpperCamelCase ( lowerCAmelCase__ : [int] , lowerCAmelCase__ : int , lowerCAmelCase__ : [int] ):
__a : Optional[int] = list(lowerCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__a : Any = clamp_rect(lowerCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
__a : List[str] = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase__ , (original_slice, 0) )
return result
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ):
__a : Tuple = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__a : Dict = tile.crop(lowerCAmelCase__ )
return tile
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ):
__a : List[str] = n % d
return n - divisor
class UpperCamelCase__ ( __lowercase ):
def __init__(self : str , snake_case_ : AutoencoderKL , snake_case_ : CLIPTextModel , snake_case_ : CLIPTokenizer , snake_case_ : UNetaDConditionModel , snake_case_ : DDPMScheduler , snake_case_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case_ : int = 3_5_0 , ):
super().__init__(
vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , max_noise_level=snake_case_ , )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[str] , **snake_case_ : Dict ):
torch.manual_seed(0 )
__a : str = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__a : Dict = add_overlap_rect(snake_case_ , snake_case_ , image.size )
__a : Dict = image.crop(snake_case_ )
__a : Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__a : Union[str, Any] = translated_slice_x - (original_image_slice / 2)
__a : int = max(0 , snake_case_ )
__a : Dict = squeeze_tile(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__a : Union[str, Any] = to_input.size
__a : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__a : str = super(snake_case_ , self ).__call__(image=snake_case_ , **snake_case_ ).images[0]
__a : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__a : Optional[int] = unsqueeze_tile(snake_case_ , snake_case_ )
__a : str = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__a : Tuple = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__a : Dict = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=snake_case_ ) , mode='''L''' , )
final_image.paste(
snake_case_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , snake_case_ )
@torch.no_grad()
def __call__(self : Tuple , snake_case_ : Union[str, List[str]] , snake_case_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case_ : int = 7_5 , snake_case_ : float = 9.0 , snake_case_ : int = 5_0 , snake_case_ : Optional[Union[str, List[str]]] = None , snake_case_ : Optional[int] = 1 , snake_case_ : float = 0.0 , snake_case_ : Optional[torch.Generator] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case_ : int = 1 , snake_case_ : int = 1_2_8 , snake_case_ : int = 3_2 , snake_case_ : int = 3_2 , ):
__a : Any = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
__a : str = math.ceil(image.size[0] / tile_size )
__a : Dict = math.ceil(image.size[1] / tile_size )
__a : List[Any] = tcx * tcy
__a : List[str] = 0
for y in range(snake_case_ ):
for x in range(snake_case_ ):
self._process_tile(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , prompt=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , noise_level=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def __UpperCamelCase ( ):
# Run a demo
__a : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
__a : Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase__ , revision='''fp16''' , torch_dtype=torch.floataa )
__a : List[Any] = pipe.to('''cuda''' )
__a : Union[str, Any] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(lowerCAmelCase__ : List[Any] ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save('''diffusers_library_progress.jpg''' )
__a : str = pipe(image=lowerCAmelCase__ , prompt='''Black font, white background, vector''' , noise_level=4_0 , callback=lowerCAmelCase__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 216 |
import colorsys
from PIL import Image # type: ignore
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
__a : Any = x
__a : List[Any] = y
for step in range(lowerCAmelCase__ ): # noqa: B007
__a : List[Any] = a * a - b * b + x
__a : Tuple = 2 * a * b + y
__a : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def __UpperCamelCase ( lowerCAmelCase__ : int = 8_0_0 , lowerCAmelCase__ : int = 6_0_0 , lowerCAmelCase__ : float = -0.6 , lowerCAmelCase__ : float = 0 , lowerCAmelCase__ : float = 3.2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : bool = True , ):
__a : int = Image.new('''RGB''' , (image_width, image_height) )
__a : Dict = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
__a : Optional[Any] = figure_width / image_width * image_height
__a : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a : Tuple = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a : Optional[int] = get_color_coded_rgb(lowerCAmelCase__ )
else:
__a : Optional[Any] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase__ =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 216 | 1 |
"""simple docstring"""
import math
import os
import sys
def lowercase ( lowerCAmelCase__ : Dict ) -> str:
__a = ''
try:
with open(_UpperCAmelCase , '''rb''' ) as binary_file:
__a = binary_file.read()
for dat in data:
__a = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> None:
lexicon.pop(_UpperCAmelCase )
__a = last_match_id
if math.loga(_UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__a = '0' + lexicon[curr_key]
__a = bin(_UpperCAmelCase )[2:]
def lowercase ( lowerCAmelCase__ : Dict ) -> str:
__a = {'0': '0', '1': '1'}
__a = '', ''
__a = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
index += 1
__a = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__a = lexicon[curr_string]
result += last_match_id
return result
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ) -> str:
__a = os.path.getsize(_UpperCAmelCase )
__a = bin(_UpperCAmelCase )[2:]
__a = len(_UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> None:
__a = 8
try:
with open(_UpperCAmelCase , '''wb''' ) as opened_file:
__a = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> None:
__a = read_file_binary(_UpperCAmelCase )
__a = compress_data(_UpperCAmelCase )
__a = add_file_length(_UpperCAmelCase , _UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.