code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
SCREAMING_SNAKE_CASE__ : Dict = {'target_lang': 'fi', 'source_lang': 'en'}
SCREAMING_SNAKE_CASE__ : Any = '>>zh<<'
SCREAMING_SNAKE_CASE__ : int = 'Helsinki-NLP/'
if is_torch_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE__ : str = 'tf'
else:
SCREAMING_SNAKE_CASE__ : Dict = 'jax'
@require_sentencepiece
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Any = MarianTokenizer
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : List[str] = True
def _lowercase ( self ) -> Union[str, Any]:
super().setUp()
lowerCamelCase : Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCamelCase : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase : int = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , **UpperCamelCase__ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def _lowercase ( self ) -> Any:
lowerCamelCase : Dict = "</s>"
lowerCamelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def _lowercase ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _lowercase ( self ) -> str:
lowerCamelCase : Any = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase : Union[str, Any] = en_de_tokenizer(["I am a small frog"] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("*" )]
self.assertIn("source.spm" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def _lowercase ( self ) -> Any:
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Optional[Any] = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _lowercase ( self ) -> List[str]:
# fmt: off
lowerCamelCase : Dict = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCamelCase : int = "Tämä on testi"
lowerCamelCase : List[str] = "This is a test"
lowerCamelCase : Optional[int] = [76, 7, 2047, 2]
lowerCamelCase : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase : Optional[int] = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 48 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
lowercase__: Optional[Any] = 0
lowercase__: List[Any] = len(__UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase__: Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCAmelCase ):
return None
lowercase__: Optional[int] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowercase__: List[Any] = left
lowercase__: int = point
elif point > right:
lowercase__: Dict = right
lowercase__: List[str] = point
else:
if item < current_item:
lowercase__: int = point - 1
else:
lowercase__: int = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase__: Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCAmelCase , __UpperCAmelCase , point + 1 , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
if collection != sorted(__UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
__A = 0
if debug == 1:
__A = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
__A = 6_7
__A = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("Not found")
| 177 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ShapEPipeline
lowerCAmelCase_ : List[str] = ["""prompt"""]
lowerCAmelCase_ : Union[str, Any] = ["""prompt"""]
lowerCAmelCase_ : str = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ : int = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return 8
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCAmelCase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**_UpperCAmelCase )
return model
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = self.dummy_tokenizer
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
UpperCAmelCase__ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any]=0 ):
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = torch_device == """cpu"""
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCAmelCase__ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase__ = pipe(
"""a shark""" , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 61 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """yolos"""
def __init__( self : str , _UpperCAmelCase : int=7_68 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Tuple=30_72 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[Any]=1E-12 , _UpperCAmelCase : Tuple=[5_12, 8_64] , _UpperCAmelCase : str=16 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=1_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=5 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Tuple=0.1 , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = num_detection_tokens
UpperCAmelCase__ = use_mid_position_embeddings
UpperCAmelCase__ = auxiliary_loss
# Hungarian matcher
UpperCAmelCase__ = class_cost
UpperCAmelCase__ = bbox_cost
UpperCAmelCase__ = giou_cost
# Loss coefficients
UpperCAmelCase__ = bbox_loss_coefficient
UpperCAmelCase__ = giou_loss_coefficient
UpperCAmelCase__ = eos_coefficient
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return 12
| 61 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , lowerCamelCase_ )
UpperCamelCase = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = BeautifulSoup(requests.get(__lowerCamelCase , params=__lowerCamelCase ).content , "html.parser" )
__snake_case : Tuple = soup.find("div" , attrs={"class": "gs_ri"} )
__snake_case : Optional[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case : Union[str, Any] = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2_018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 134 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case : Union[str, Any] = ["small", "medium", "large"]
_snake_case : List[Any] = "lm_head.decoder.weight"
_snake_case : Optional[Any] = "lm_head.weight"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = torch.load(__lowerCamelCase )
__snake_case : Dict = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_snake_case : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_snake_case : List[str] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134 | 1 |
_lowerCamelCase : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def a__ ( UpperCAmelCase : dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> list[str]:
UpperCAmelCase : Dict = set()
# keep track of all the paths to be checked
UpperCAmelCase : List[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase : int = queue.pop(0 )
# get the last node from the path
UpperCAmelCase : Optional[Any] = path[-1]
if node not in explored:
UpperCAmelCase : Tuple = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase : int = list(UpperCAmelCase )
new_path.append(UpperCAmelCase )
queue.append(UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def a__ ( UpperCAmelCase : dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase : List[Any] = [start]
UpperCAmelCase : str = set(UpperCAmelCase )
# Keep tab on distances from `start` node.
UpperCAmelCase : List[str] = {start: 0, target: -1}
while queue:
UpperCAmelCase : Tuple = queue.pop(0 )
if node == target:
UpperCAmelCase : List[str] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCAmelCase )
queue.append(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 336 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCamelCase : Optional[Union[str, Path]] = None
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[Dict] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = 1
__lowerCamelCase : Optional[Union[str, bool]] = None
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[Dict] = None
__lowerCamelCase : Optional[str] = None
def snake_case_ ( self):
return self.__class__(**{k: copy.deepcopy(a) for k, v in self.__dict__.items()})
| 216 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a , a = None , a = None , a = True , a = None , a = False , a = None , a = True , a = "arrow" , **a , ):
super().__init__(
split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , **a , )
lowercase__ : Optional[int] = load_from_cache_file
lowercase__ : Optional[int] = file_format
lowercase__ : int = Spark(
df=a , features=a , cache_dir=a , working_dir=a , **a , )
def snake_case_ ( self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
lowercase__ : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 216 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
return (-y * np.log(lowerCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.dot(lowerCAmelCase , lowerCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase ) ) )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=7_0_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = np.dot(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = sigmoid_function(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE_ : Optional[Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE_ : List[str] = np.dot(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = sigmoid_function(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = cost_function(lowerCAmelCase , lowerCAmelCase )
if iterations % 1_0_0 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : List[str] = (iris.target != 0) * 1
__lowerCamelCase : Tuple = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
return sigmoid_function(
np.dot(lowerCAmelCase , lowerCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Any = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 18 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Union[str, Any]:
return self._labels
@property
def _a ( self ) -> Optional[Any]:
return self._num_examples
@property
def _a ( self ) -> List[str]:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 62 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
snake_case : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
snake_case : Any = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case : int = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
snake_case : int = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case : Dict = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
snake_case : Union[str, Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case : Tuple = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
snake_case : Tuple = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case : int = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
| 365 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__lowerCamelCase = Mapping[str, np.ndarray]
__lowerCamelCase = Mapping[str, Any] # Is a nested dict.
__lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=A_ )
class UpperCAmelCase :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] = None
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = r"(\[[A-Z]+\]\n)"
snake_case : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
snake_case : List[str] = ["N", "CA", "C"]
snake_case : str = None
snake_case : str = None
snake_case : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
snake_case : Optional[Any] = "X" # FIXME: strings are immutable
snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
snake_case : Union[str, Any] = np.array(__lowerCamelCase )
snake_case : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
snake_case : List[str] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
snake_case : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : int = 0 ):
snake_case : List[str] = []
snake_case : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case : Union[str, Any] = prot.parents
snake_case : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case : Tuple = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
snake_case : int = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def UpperCamelCase ( __lowerCamelCase : Protein , __lowerCamelCase : str ):
snake_case : List[str] = []
snake_case : Any = pdb_str.split("\n" )
snake_case : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
snake_case : Optional[Any] = []
if prot.parents_chain_index is not None:
snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
snake_case : List[str] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case : Optional[Any] = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case : Optional[Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase : Sequence[str] ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case : List[Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
snake_case : int = parents_per_chain[chain_counter]
else:
snake_case : Any = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
snake_case : str = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
snake_case : List[Any] = residue_constants.atom_types
snake_case : List[str] = []
snake_case : Any = prot.atom_mask
snake_case : Any = prot.aatype
snake_case : Dict = prot.atom_positions
snake_case : List[str] = prot.residue_index.astype(np.intaa )
snake_case : Dict = prot.b_factors
snake_case : Tuple = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
snake_case : Any = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
snake_case : Dict = aatype.shape[0]
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Union[str, Any] = string.ascii_uppercase
snake_case : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
snake_case : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case : Any = "ATOM"
snake_case : str = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
snake_case : Optional[Any] = ""
snake_case : Dict = ""
snake_case : Optional[Any] = 1.00
snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case : Dict = ""
snake_case : Any = "A"
if chain_index is not None:
snake_case : str = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case : List[str] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
snake_case : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case : Any = True
snake_case : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case : Optional[Any] = "TER"
snake_case : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __lowerCamelCase : FeatureDict , __lowerCamelCase : ModelOutput , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[np.ndarray] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Sequence[str]] = None , __lowerCamelCase : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 10 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def A_ ( snake_case : Optional[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Any , snake_case : int , snake_case : List[Any] , snake_case : Tuple , snake_case : str , snake_case : Dict , snake_case : Dict , snake_case : Tuple , ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
__UpperCamelCase , __UpperCamelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
__UpperCamelCase = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCamelCase = file_path.read_text(encoding='''utf-8''' )
else:
__UpperCamelCase = output_path.read_text(encoding='''utf-8''' )
__UpperCamelCase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def A_ ( snake_case : int , snake_case : List[str] , snake_case : str , snake_case : Tuple , snake_case : List[str] , snake_case : List[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Any , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
__UpperCamelCase = input_paths[compression_format]
if input_path is None:
__UpperCamelCase = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = Extractor.infer_extractor_format(_SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__UpperCamelCase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCamelCase = file_path.read_text(encoding='''utf-8''' )
else:
__UpperCamelCase = output_path.read_text(encoding='''utf-8''' )
__UpperCamelCase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def A_ ( snake_case : Any , snake_case : int ) -> List[str]:
'''simple docstring'''
import tarfile
__UpperCamelCase = tmp_path / '''data_dot_dot'''
directory.mkdir()
__UpperCamelCase = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def A_ ( snake_case : str ) -> Dict:
'''simple docstring'''
import tarfile
__UpperCamelCase = tmp_path / '''data_sym_link'''
directory.mkdir()
__UpperCamelCase = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=_SCREAMING_SNAKE_CASE )
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def A_ ( snake_case : Optional[Any] , snake_case : str , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Any ) -> Dict:
'''simple docstring'''
__UpperCamelCase = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
__UpperCamelCase = insecure_tar_files[insecure_tar_file]
__UpperCamelCase = tmp_path / '''extracted'''
TarExtractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def A_ ( snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
__UpperCamelCase = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(_SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_SCREAMING_SNAKE_CASE ) # but we're right
| 328 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = f"""class {class_name}("""
_snake_case = f"""{4 * " "}def {test_name}("""
_snake_case = f"""{8 * " "}{correct_line.split()[0]}"""
_snake_case = f"""{16 * " "}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
_snake_case = _snake_case = _snake_case = _snake_case = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
_snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__lowerCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 341 | 0 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def lowerCamelCase ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__UpperCAmelCase : str = json.loads(_UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__UpperCAmelCase : Optional[int] = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__UpperCAmelCase : Any = json.loads(_UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , UpperCamelCase , )
@cached_property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__UpperCAmelCase : Optional[Any] = torch.device("""cpu""" )
__UpperCAmelCase : Union[str, Any] = 0
elif is_sagemaker_model_parallel_available():
__UpperCAmelCase : Tuple = smp.local_rank()
__UpperCAmelCase : Optional[Any] = torch.device("""cuda""" , UpperCamelCase )
__UpperCAmelCase : str = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__UpperCAmelCase : Dict = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__UpperCAmelCase : Dict = torch.device("""cuda""" , self.local_rank )
__UpperCAmelCase : Optional[int] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__UpperCAmelCase : Optional[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__UpperCAmelCase : Dict = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__UpperCAmelCase : str = torch.device("""cuda""" , self.local_rank )
__UpperCAmelCase : List[str] = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase )
return device
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return False
| 320 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[int] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123 | 0 |
from math import loga
def UpperCamelCase ( _A ):
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_A, _A ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Optional[Any]:
__magic_name__ : Optional[Any] = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Tuple = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : List[str] = attention_probs_dropout_prob
__magic_name__ : Tuple = type_sequence_label_size
__magic_name__ : Any = initializer_range
__magic_name__ : str = scope
__magic_name__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__magic_name__ : Dict = (image_size // patch_size) ** 2
__magic_name__ : int = num_patches + 2
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[str]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TFDeiTModel(config=lowerCAmelCase__ )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : int = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ )
__magic_name__ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Tuple = 1
__magic_name__ : List[Any] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Union[str, Any] = self.type_sequence_label_size
__magic_name__ : str = TFDeiTForImageClassification(lowerCAmelCase__ )
__magic_name__ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : int = 1
__magic_name__ : List[str] = TFDeiTForImageClassification(lowerCAmelCase__ )
__magic_name__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase__ : Any = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase__ : int = False
lowercase__ : List[Any] = False
lowercase__ : Tuple = False
lowercase__ : int = False
def __magic_name__ ( self ) -> str:
__magic_name__ : str = TFDeiTModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ ,__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(lowerCAmelCase__ )
__magic_name__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int = [*signature.parameters.keys()]
__magic_name__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
__magic_name__ : Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __magic_name__ ( self ) -> Dict:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = TFDeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__magic_name__ : Any = self.default_image_processor
__magic_name__ : Tuple = prepare_img()
__magic_name__ : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""" )
# forward pass
__magic_name__ : Dict = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : List[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 138 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase__ = '''sshleifer/bart-tiny-random'''
lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : List[Any] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : Optional[int] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : List[str] = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : Any = create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=__lowerCAmelCase , d=__lowerCAmelCase )
| 72 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 371 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self
_lowerCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowercase )
yield node.data
_lowerCAmelCase = node.next_node
@property
def _lowercase ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_lowercase = Node(1)
_lowercase = Node(2)
_lowercase = Node(3)
_lowercase = Node(4)
print(root_node.has_loop) # False
_lowercase = root_node.next_node
print(root_node.has_loop) # True
_lowercase = Node(5)
_lowercase = Node(6)
_lowercase = Node(5)
_lowercase = Node(6)
print(root_node.has_loop) # False
_lowercase = Node(1)
print(root_node.has_loop) # False
| 229 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowercase : Optional[int] = True
except ImportError:
lowercase : int = False
try:
from torch.hub import _get_torch_home
lowercase : List[Any] = _get_torch_home()
except ImportError:
lowercase : Optional[int] = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowercase : List[str] = os.path.join(torch_cache_home, """transformers""")
lowercase : List[str] = """https://cdn.huggingface.co"""
lowercase : int = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowercase : int = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowercase : List[Any] = os.path.join(PATH, """config.yaml""")
lowercase : List[str] = os.path.join(PATH, """attributes.txt""")
lowercase : Optional[Any] = os.path.join(PATH, """objects.txt""")
lowercase : Tuple = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowercase : int = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowercase : Optional[Any] = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowercase : Tuple = """pytorch_model.bin"""
lowercase : int = """config.yaml"""
def A_ ( A__=OBJECTS , A__=ATTRIBUTES ) -> Union[str, Any]:
a__ : Dict = []
with open(A__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
a__ : Optional[int] = []
with open(A__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def A_ ( A__ ) -> Union[str, Any]:
a__ : Optional[Any] = OrderedDict()
with open(A__ , 'rb' ) as f:
a__ : List[str] = pkl.load(A__ )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
a__ : Tuple = ckp.pop(A__ )
if isinstance(A__ , np.ndarray ):
a__ : str = torch.tensor(A__ )
else:
assert isinstance(A__ , torch.tensor ), type(A__ )
a__ : Optional[int] = v
return r
class A__ :
"""simple docstring"""
__A : Optional[Any] = {}
def __init__( self , lowercase , lowercase = "root" , lowercase=0) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = name
a__ : Optional[Any] = level
a__ : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a__ : Dict = copy.deepcopy(lowercase)
a__ : Any = copy.deepcopy(lowercase)
if isinstance(lowercase , lowercase):
a__ : Tuple = Config(lowercase , name=lowercase , level=level + 1)
a__ : Dict = v
setattr(self , lowercase , lowercase)
a__ : List[Any] = d
def __repr__( self) -> Optional[int]:
'''simple docstring'''
return str(list((self._pointer.keys())))
def __setattr__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = val
a__ : Tuple = val
a__ : int = key.split('.')
a__ : Tuple = len(lowercase) - 1
a__ : int = self._pointer
if len(lowercase) > 1:
for i, l in enumerate(lowercase):
if hasattr(self , lowercase) and isinstance(getattr(self , lowercase) , lowercase):
setattr(getattr(self , lowercase) , '.'.join(levels[i:]) , lowercase)
if l == last_level:
a__ : List[str] = val
else:
a__ : Tuple = pointer[l]
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self._pointer
def __lowercase ( self , lowercase , lowercase) -> str:
'''simple docstring'''
with open(F'{file_name}' , 'w') as stream:
dump(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
with open(F'{file_name}' , 'w') as stream:
json.dump(lowercase , lowercase)
@staticmethod
def __lowercase ( lowercase) -> Optional[int]:
'''simple docstring'''
with open(lowercase) as stream:
a__ : Tuple = load(lowercase , Loader=lowercase)
return data
def __str__( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = ' '
if self._name != "root":
a__ : Any = F'{t * (self._level-1)}{self._name}:\n'
else:
a__ : Dict = ''
a__ : Tuple = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowercase , lowercase):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(lowercase).__name__})\n'
a__ : Tuple = level
return r[:-1]
@classmethod
def __lowercase ( cls , lowercase , **lowercase) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[Any] = cls.get_config_dict(lowercase , **lowercase)
return cls(lowercase)
@classmethod
def __lowercase ( cls , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = kwargs.pop('cache_dir' , lowercase)
a__ : Tuple = kwargs.pop('force_download' , lowercase)
a__ : str = kwargs.pop('resume_download' , lowercase)
a__ : Tuple = kwargs.pop('proxies' , lowercase)
a__ : List[str] = kwargs.pop('local_files_only' , lowercase)
if os.path.isdir(lowercase):
a__ : int = os.path.join(lowercase , lowercase)
elif os.path.isfile(lowercase) or is_remote_url(lowercase):
a__ : Union[str, Any] = pretrained_model_name_or_path
else:
a__ : Tuple = hf_bucket_url(lowercase , filename=lowercase , use_cdn=lowercase)
try:
# Load from URL or cache if already cached
a__ : Optional[Any] = cached_path(
lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , local_files_only=lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a__ : Optional[int] = Config.load_yaml(lowercase)
except EnvironmentError:
a__ : List[str] = 'Can\'t load config for'
raise EnvironmentError(lowercase)
if resolved_config_file == config_file:
print('loading configuration file from path')
else:
print('loading configuration file cache')
return Config.load_yaml(lowercase), kwargs
def A_ ( A__ ) -> Union[str, Any]:
a__ : str = torch.load('dump.pt' , map_location=in_tensor.device )
a__ : Optional[Any] = in_tensor.numpy()
a__ : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A__ , A__ , rtol=0.01 , atol=0.1 ), (
F'{sum([1 for x in np.isclose(A__ , A__ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def A_ ( A__ ) -> str:
a__ : Dict = urlparse(A__ )
return parsed.scheme in ("http", "https")
def A_ ( A__ , A__ , A__=True ) -> str:
a__ : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a__ : Any = '/' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def A_ ( A__ , A__ , A__=None , A__=0 , A__=None , ) -> Union[str, Any]:
a__ : int = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A__ , A__ ):
ua += "; " + "; ".join('{}/{}'.format(A__ , A__ ) for k, v in user_agent.items() )
elif isinstance(A__ , A__ ):
ua += "; " + user_agent
a__ : Dict = {'user-agent': ua}
if resume_size > 0:
a__ : Optional[Any] = 'bytes=%d-' % (resume_size,)
a__ : Any = requests.get(A__ , stream=A__ , proxies=A__ , headers=A__ )
if response.status_code == 416: # Range not satisfiable
return
a__ : List[Any] = response.headers.get('Content-Length' )
a__ : Optional[Any] = resume_size + int(A__ ) if content_length is not None else None
a__ : Any = tqdm(
unit='B' , unit_scale=A__ , total=A__ , initial=A__ , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A__ ) )
temp_file.write(A__ )
progress.close()
def A_ ( A__ , A__=None , A__=False , A__=None , A__=10 , A__=False , A__=None , A__=False , ) -> List[str]:
if cache_dir is None:
a__ : str = TRANSFORMERS_CACHE
if isinstance(A__ , A__ ):
a__ : Dict = str(A__ )
os.makedirs(A__ , exist_ok=A__ )
a__ : Union[str, Any] = None
if not local_files_only:
try:
a__ : Union[str, Any] = requests.head(A__ , allow_redirects=A__ , proxies=A__ , timeout=A__ )
if response.status_code == 200:
a__ : Union[str, Any] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a__ : Optional[Any] = url_to_filename(A__ , A__ )
# get cache path to put the file
a__ : str = os.path.join(A__ , A__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A__ ):
return cache_path
else:
a__ : List[Any] = [
file
for file in fnmatch.filter(os.listdir(A__ ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(A__ ) > 0:
return os.path.join(A__ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(A__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a__ : str = cache_path + '.lock'
with FileLock(A__ ):
# If the download just completed while the lock was activated.
if os.path.exists(A__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a__ : Any = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(A__ , 'a+b' ) as f:
yield f
a__ : Optional[Any] = _resumable_file_manager
if os.path.exists(A__ ):
a__ : str = os.stat(A__ ).st_size
else:
a__ : str = 0
else:
a__ : List[Any] = partial(tempfile.NamedTemporaryFile , dir=A__ , delete=A__ )
a__ : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , A__ , temp_file.name , )
http_get(
A__ , A__ , proxies=A__ , resume_size=A__ , user_agent=A__ , )
os.replace(temp_file.name , A__ )
a__ : Dict = {'url': url, 'etag': etag}
a__ : str = cache_path + '.json'
with open(A__ , 'w' ) as meta_file:
json.dump(A__ , A__ )
return cache_path
def A_ ( A__ , A__=None ) -> Union[str, Any]:
a__ : int = url.encode('utf-8' )
a__ : str = shaaaa(A__ )
a__ : Union[str, Any] = url_hash.hexdigest()
if etag:
a__ : Optional[Any] = etag.encode('utf-8' )
a__ : Tuple = shaaaa(A__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def A_ ( A__ , A__=None , A__=False , A__=None , A__=False , A__=None , A__=False , A__=False , A__=False , ) -> Any:
if cache_dir is None:
a__ : Tuple = TRANSFORMERS_CACHE
if isinstance(A__ , A__ ):
a__ : Optional[Any] = str(A__ )
if isinstance(A__ , A__ ):
a__ : Optional[int] = str(A__ )
if is_remote_url(A__ ):
# URL, so get it from the cache (downloading if necessary)
a__ : Any = get_from_cache(
A__ , cache_dir=A__ , force_download=A__ , proxies=A__ , resume_download=A__ , user_agent=A__ , local_files_only=A__ , )
elif os.path.exists(A__ ):
# File, and it exists.
a__ : Tuple = url_or_filename
elif urlparse(A__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(A__ ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(A__ ) )
if extract_compressed_file:
if not is_zipfile(A__ ) and not tarfile.is_tarfile(A__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a__ , a__ : List[Any] = os.path.split(A__ )
a__ : Dict = output_file.replace('.' , '-' ) + '-extracted'
a__ : Union[str, Any] = os.path.join(A__ , A__ )
if os.path.isdir(A__ ) and os.listdir(A__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a__ : List[Any] = output_path + '.lock'
with FileLock(A__ ):
shutil.rmtree(A__ , ignore_errors=A__ )
os.makedirs(A__ )
if is_zipfile(A__ ):
with ZipFile(A__ , 'r' ) as zip_file:
zip_file.extractall(A__ )
zip_file.close()
elif tarfile.is_tarfile(A__ ):
a__ : List[Any] = tarfile.open(A__ )
tar_file.extractall(A__ )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(A__ ) )
return output_path_extracted
return output_path
def A_ ( A__ , A__="," ) -> Union[str, Any]:
assert isinstance(A__ , A__ )
if os.path.isfile(A__ ):
with open(A__ ) as f:
a__ : List[Any] = eval(f.read() )
else:
a__ : Optional[Any] = requests.get(A__ )
try:
a__ : str = requests.json()
except Exception:
a__ : str = req.content.decode()
assert data is not None, "could not connect"
try:
a__ : Tuple = eval(A__ )
except Exception:
a__ : List[Any] = data.split('\n' )
req.close()
return data
def A_ ( A__ ) -> List[str]:
a__ : str = requests.get(A__ )
a__ : str = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A_ ( A__ ) -> Any:
a__ : List[str] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A__ )
with open(A__ , 'rb' ) as stream:
a__ : Tuple = pkl.load(A__ )
a__ : Optional[Any] = weights.pop('model' )
a__ : int = {}
for k, v in model.items():
a__ : List[str] = torch.from_numpy(A__ )
if "running_var" in k:
a__ : str = torch.tensor([0] )
a__ : List[Any] = k.replace('running_var' , 'num_batches_tracked' )
a__ : int = zero
return new
def A_ ( ) -> List[Any]:
print(F'{os.path.abspath(os.path.join(A__ , os.pardir ) )}/demo.ipynb' )
def A_ ( A__ , A__="RGB" ) -> List[Any]:
assert isinstance(A__ , A__ )
if os.path.isfile(A__ ):
a__ : Any = cva.imread(A__ )
else:
a__ : Dict = get_image_from_url(A__ )
assert img is not None, F'could not connect to: {im}'
a__ : Dict = cva.cvtColor(A__ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a__ : Optional[int] = img[:, :, ::-1]
return img
def A_ ( A__ , A__=1 ) -> Tuple:
return (images[i : i + batch] for i in range(0 , len(A__ ) , A__ ))
| 99 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase_ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : str = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(__snake_case , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
UpperCAmelCase_ : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase__ ( __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : List[Any] = config['lr']
UpperCAmelCase_ : Dict = int(config['num_epochs'] )
UpperCAmelCase_ : List[Any] = int(config['seed'] )
UpperCAmelCase_ : List[Any] = int(config['batch_size'] )
UpperCAmelCase_ : Optional[Any] = args.model_name_or_path
set_seed(__snake_case )
UpperCAmelCase_ : Optional[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
UpperCAmelCase_ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Union[str, Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
UpperCAmelCase_ : Union[str, Any] = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Union[str, Any] = 0
# Now we train the model
UpperCAmelCase_ : str = evaluate.load('glue' , 'mrpc' )
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : List[Any] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
UpperCAmelCase_ : Union[str, Any] = model(**__snake_case )
UpperCAmelCase_ : int = outputs.loss
UpperCAmelCase_ : Any = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase_ : Optional[int] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Any = model(**__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ : Optional[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
UpperCAmelCase_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
UpperCAmelCase_ : Optional[Any] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase_ : str = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(__snake_case , __snake_case )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=__snake_case , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__snake_case , )
parser.add_argument(
'--output_dir' , type=__snake_case , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=__snake_case , default=__snake_case , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=__snake_case , default=3 , help='Number of train epochs.' , )
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 352 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ) -> int:
super().__init__()
UpperCAmelCase_ : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase_ : Optional[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = torch.nn.Parameter(_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : VQModel
_snake_case : CLIPTextModel
_snake_case : CLIPTokenizer
_snake_case : TransformeraDModel
_snake_case : LearnedClassifierFreeSamplingEmbeddings
_snake_case : VQDiffusionScheduler
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(
vqvae=_UpperCamelCase , transformer=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , scheduler=_UpperCamelCase , learned_classifier_free_sampling_embeddings=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = len(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else 1
# get prompt text embeddings
UpperCAmelCase_ : str = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCAmelCase_ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase_ : str = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : Dict = prompt_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase_ : List[str] = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase_ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(_UpperCamelCase , 1 , 1 )
else:
UpperCAmelCase_ : List[Any] = [''] * batch_size
UpperCAmelCase_ : List[Any] = text_input_ids.shape[-1]
UpperCAmelCase_ : Dict = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' , )
UpperCAmelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase_ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[Any] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : Dict = negative_prompt_embeds.repeat(1 , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 5.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = 1
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Union[str, Any] = batch_size * num_images_per_prompt
UpperCAmelCase_ : Optional[int] = guidance_scale > 1.0
UpperCAmelCase_ : Any = self._encode_prompt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(_UpperCamelCase )}." )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase_ : Tuple = self.transformer.num_vector_embeds - 1
UpperCAmelCase_ : List[Any] = torch.full(_UpperCamelCase , _UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCAmelCase_ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ : Union[str, Any] = latents
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase_ : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase_ : Dict = self.transformer(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase ).sample
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_output.chunk(2 )
UpperCAmelCase_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_UpperCamelCase , dim=1 , keepdim=_UpperCamelCase )
UpperCAmelCase_ : str = self.truncate(_UpperCamelCase , _UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase_ : Optional[int] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = self.vqvae.config.vq_embed_dim
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase_ : int = self.vqvae.quantize.get_codebook_entry(_UpperCamelCase , shape=_UpperCamelCase )
UpperCAmelCase_ : Dict = self.vqvae.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase ).sample
UpperCAmelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : int = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.FloatTensor:
UpperCAmelCase_ , UpperCAmelCase_ : int = torch.sort(_UpperCamelCase , 1 , descending=_UpperCamelCase )
UpperCAmelCase_ : Dict = torch.exp(_UpperCamelCase )
UpperCAmelCase_ : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , _UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase_ : int = keep_mask[:, :-1, :]
UpperCAmelCase_ : Any = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase_ : str = log_p_x_0.clone()
UpperCAmelCase_ : Any = -torch.inf # -inf = log(0)
return rv
| 145 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : str = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''table-transformer'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=100 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__A : str = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Any = backbone_config.get('model_type')
__A : List[str] = CONFIG_MAPPING[backbone_model_type]
__A : Optional[Any] = config_class.from_dict(_UpperCAmelCase)
# set timm attributes to None
__A ,__A ,__A : str = None, None, None
__A : Tuple = use_timm_backbone
__A : str = backbone_config
__A : Union[str, Any] = num_channels
__A : Optional[Any] = num_queries
__A : List[Any] = d_model
__A : Optional[int] = encoder_ffn_dim
__A : Optional[int] = encoder_layers
__A : List[Any] = encoder_attention_heads
__A : Dict = decoder_ffn_dim
__A : int = decoder_layers
__A : Tuple = decoder_attention_heads
__A : int = dropout
__A : List[Any] = attention_dropout
__A : Any = activation_dropout
__A : str = activation_function
__A : str = init_std
__A : str = init_xavier_std
__A : List[Any] = encoder_layerdrop
__A : List[str] = decoder_layerdrop
__A : Tuple = encoder_layers
__A : List[Any] = auxiliary_loss
__A : Tuple = position_embedding_type
__A : List[Any] = backbone
__A : Dict = use_pretrained_backbone
__A : Union[str, Any] = dilation
# Hungarian matcher
__A : Union[str, Any] = class_cost
__A : Optional[int] = bbox_cost
__A : Tuple = giou_cost
# Loss coefficients
__A : Dict = mask_loss_coefficient
__A : List[str] = dice_loss_coefficient
__A : int = bbox_loss_coefficient
__A : Union[str, Any] = giou_loss_coefficient
__A : int = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 12 | 190 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''llama'''
lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=3_2000 , _UpperCAmelCase=4096 , _UpperCAmelCase=1_1008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = vocab_size
__A : Union[str, Any] = max_position_embeddings
__A : Any = hidden_size
__A : Optional[Any] = intermediate_size
__A : str = num_hidden_layers
__A : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__A : List[Any] = num_attention_heads
__A : int = num_key_value_heads
__A : List[Any] = hidden_act
__A : Union[str, Any] = initializer_range
__A : List[Any] = rms_norm_eps
__A : Any = pretraining_tp
__A : Optional[Any] = use_cache
__A : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}')
__A : Optional[Any] = self.rope_scaling.get('type' , _UpperCAmelCase)
__A : Tuple = self.rope_scaling.get('factor' , _UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}') | 190 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase (lowercase__ ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = '''vivit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=32 , _UpperCAmelCase=[2, 16, 16] , _UpperCAmelCase=3 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu_fast" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-0_6 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Any = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Tuple = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Any = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: Any = layer_norm_eps
lowercase__: Dict = image_size
lowercase__: Optional[int] = num_frames
lowercase__: str = tubelet_size
lowercase__: Dict = num_channels
lowercase__: Optional[Any] = qkv_bias
super().__init__(**_a )
| 354 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :List[str] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = """gpt_neo"""
A_ : Optional[Any] = ["""past_key_values"""]
A_ : str = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : int , _A : int=50257 , _A : List[str]=2048 , _A : Dict=2048 , _A : int=24 , _A : str=[[["global", "local"], 12]] , _A : Tuple=16 , _A : Dict=None , _A : List[Any]=256 , _A : str="gelu_new" , _A : List[str]=0.0 , _A : str=0.0 , _A : Optional[int]=0.0 , _A : Optional[int]=0.1 , _A : int=1E-5 , _A : Optional[int]=0.02 , _A : int=True , _A : Optional[int]=50256 , _A : Any=50256 , **_A : Union[str, Any] , ) -> Any:
__magic_name__ : List[str] = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Any = num_layers
__magic_name__ : Tuple = num_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Dict = window_size
__magic_name__ : Union[str, Any] = activation_function
__magic_name__ : Optional[Any] = resid_dropout
__magic_name__ : Tuple = embed_dropout
__magic_name__ : List[str] = attention_dropout
__magic_name__ : Optional[int] = classifier_dropout
__magic_name__ : Dict = layer_norm_epsilon
__magic_name__ : int = initializer_range
__magic_name__ : Optional[Any] = use_cache
__magic_name__ : Dict = bos_token_id
__magic_name__ : Tuple = eos_token_id
__magic_name__ : str = attention_types
__magic_name__ : Optional[int] = self.expand_attention_types_params(_A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
@staticmethod
def __lowerCAmelCase ( _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ):
"""simple docstring"""
import torch
__magic_name__ : Any = input.size()
__magic_name__ : Optional[int] = len(lowerCAmelCase )
__magic_name__ : Tuple = shape[dimension]
__magic_name__ : Union[str, Any] = torch.arange(0 , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Dict = torch.div(sizedim - size , lowerCAmelCase , rounding_mode='floor' ) + 1
__magic_name__ : int = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None]
__magic_name__ : str = [slice(lowerCAmelCase )] * rank
__magic_name__ : Tuple = indices
__magic_name__ : str = input[s]
__magic_name__ : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
import torch
__magic_name__ : Optional[int] = torch.arange(1 , lowerCAmelCase )
__magic_name__ : List[str] = torch.remainder(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[Any] = remainders == 0
__magic_name__ : List[Any] = candidates[divisor_indices]
__magic_name__ : Dict = torch.max(lowerCAmelCase )
return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode='floor' )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
__magic_name__ : Union[str, Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__magic_name__ : Any = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__magic_name__ : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCAmelCase ( self : Dict ) -> int:
return self._config.num_heads
def __lowerCAmelCase ( self : Dict , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__magic_name__ : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__magic_name__ : List[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__magic_name__ , __magic_name__ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__magic_name__ : List[str] = seqlen + 2
__magic_name__ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__ : str = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__magic_name__ : List[Any] = common_inputs['attention_mask']
if self.use_past:
__magic_name__ : List[str] = ordered_inputs['attention_mask'].dtype
__magic_name__ : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self : Optional[int] ) -> int:
return 13 | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ : int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
SCREAMING_SNAKE_CASE_ : Any = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : int = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Tuple = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[str] = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 162 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = key
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : Dict = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : str = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 162 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = (1 - _cos) / 2
__UpperCamelCase = 1 - _cos
__UpperCamelCase = 1 + alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = (1 + _cos) / 2
__UpperCamelCase = -1 - _cos
__UpperCamelCase = 1 + alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = _sin / 2
__UpperCamelCase = 0
__UpperCamelCase = -ba
__UpperCamelCase = 1 + alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 1 - alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 + alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 10 ** (gain_db / 40)
__UpperCamelCase = 1 + alpha * big_a
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha * big_a
__UpperCamelCase = 1 + alpha / big_a
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha / big_a
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 10 ** (gain_db / 40)
__UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
__UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
__UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
__UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
__UpperCamelCase = 2 * sqrt(_lowercase ) * alpha
__UpperCamelCase = big_a * (pmc + aaa)
__UpperCamelCase = 2 * big_a * mpc
__UpperCamelCase = big_a * (pmc - aaa)
__UpperCamelCase = ppmc + aaa
__UpperCamelCase = -2 * pmpc
__UpperCamelCase = ppmc - aaa
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 10 ** (gain_db / 40)
__UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
__UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
__UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
__UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
__UpperCamelCase = 2 * sqrt(_lowercase ) * alpha
__UpperCamelCase = big_a * (ppmc + aaa)
__UpperCamelCase = -2 * big_a * pmpc
__UpperCamelCase = big_a * (ppmc - aaa)
__UpperCamelCase = pmc + aaa
__UpperCamelCase = 2 * mpc
__UpperCamelCase = pmc - aaa
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 310 |
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "xmod"
def __init__( self : List[str], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Union[str, Any]=1_2, _UpperCAmelCase : Tuple=1_2, _UpperCAmelCase : List[Any]=3_0_7_2, _UpperCAmelCase : List[str]="gelu", _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : List[Any]=2, _UpperCAmelCase : Union[str, Any]=0.02, _UpperCAmelCase : int=1E-12, _UpperCAmelCase : str=1, _UpperCAmelCase : Optional[int]=0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict="absolute", _UpperCAmelCase : int=True, _UpperCAmelCase : Optional[int]=None, _UpperCAmelCase : Tuple=False, _UpperCAmelCase : Any=2, _UpperCAmelCase : Tuple=False, _UpperCAmelCase : int=True, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=("en_XX",), _UpperCAmelCase : List[str]=None, **_UpperCAmelCase : List[Any], ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : str = classifier_dropout
SCREAMING_SNAKE_CASE__ : Any = pre_norm
SCREAMING_SNAKE_CASE__ : Union[str, Any] = adapter_reduction_factor
SCREAMING_SNAKE_CASE__ : List[str] = adapter_layer_norm
SCREAMING_SNAKE_CASE__ : List[Any] = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE__ : Optional[Any] = ln_before_adapter
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = default_language
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@property
def A_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 191 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
if num < 0:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = -num
SCREAMING_SNAKE_CASE__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__snake_case = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__snake_case = TaTokenizerFast
__snake_case = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__snake_case = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 348 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 0 |
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = arr.split(""",""" )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
_SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_SCREAMING_SNAKE_CASE = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_SCREAMING_SNAKE_CASE = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCamelCase = input('''please input some numbers:''')
UpperCamelCase = SubArray(whole_array)
UpperCamelCase = array.solve_sub_array()
print(('''the results is:''', re))
| 125 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase = get_logger(__name__)
class __UpperCAmelCase :
__snake_case : Tuple = "dummy_data"
__snake_case : List[Any] = "datasets"
__snake_case : List[Any] = False
def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Union[Version, str] , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[List[Callable]] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = dataset_name
_SCREAMING_SNAKE_CASE = cache_dir
_SCREAMING_SNAKE_CASE = use_local_dummy_data
_SCREAMING_SNAKE_CASE = config
# download_callbacks take a single url as input
_SCREAMING_SNAKE_CASE = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_SCREAMING_SNAKE_CASE = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_SCREAMING_SNAKE_CASE = str(UpperCAmelCase_ )
# to be downloaded
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if self._dummy_file is None:
_SCREAMING_SNAKE_CASE = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_SCREAMING_SNAKE_CASE = cached_path(
UpperCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCAmelCase_ , force_extract=UpperCAmelCase_ )
return os.path.join(UpperCAmelCase_ , self.dummy_file_name )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
if self._bucket_url is None:
_SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def UpperCamelCase ( self: str , UpperCAmelCase_: str , *UpperCAmelCase_: Dict ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_SCREAMING_SNAKE_CASE = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_SCREAMING_SNAKE_CASE = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return self.create_dummy_data_dict(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCAmelCase_ , UpperCAmelCase_ )
else:
return self.create_dummy_data_single(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Any ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
return path
def UpperCamelCase ( self: str ):
'''simple docstring'''
return {}
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for single_url in single_urls:
download_callback(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = single_urls
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) ) for x in single_urls]
else:
_SCREAMING_SNAKE_CASE = single_urls
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) )
_SCREAMING_SNAKE_CASE = value
# make sure that values are unique
if all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_SCREAMING_SNAKE_CASE = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCAmelCase_ ) ) for url in data_url )
_SCREAMING_SNAKE_CASE = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_SCREAMING_SNAKE_CASE = [data_url[0]] * len(UpperCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCAmelCase_ )
return dummy_data_list
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
def _iter_archive_members(UpperCAmelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
_SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent
_SCREAMING_SNAKE_CASE = path.relative_to(UpperCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_SCREAMING_SNAKE_CASE = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = Path(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = _iter_archive_members(UpperCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCAmelCase_ ).as_posix(), file_path.open("""rb""" )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [paths]
for path in paths:
if os.path.isfile(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCAmelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
| 125 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowercase__, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowercase__, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowercase__ )
return parser.parse_args()
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =parse_args()
# Import training_script as a module.
__lowercase =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase =script_fpath.stem
__lowercase =importlib.import_module(lowercase__ )
# Patch sys.argv
__lowercase =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 141 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : str, lowercase__ : int ):
'''simple docstring'''
__lowercase =OmegaConf.load(lowercase__ )
__lowercase =torch.load(lowercase__, map_location='cpu' )['model']
__lowercase =list(state_dict.keys() )
# extract state_dict for VQVAE
__lowercase ={}
__lowercase ='first_stage_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase =state_dict[key]
# extract state_dict for UNetLDM
__lowercase ={}
__lowercase ='model.diffusion_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase =state_dict[key]
__lowercase =config.model.params.first_stage_config.params
__lowercase =config.model.params.unet_config.params
__lowercase =VQModel(**lowercase__ ).eval()
vqvae.load_state_dict(lowercase__ )
__lowercase =UNetLDMModel(**lowercase__ ).eval()
unet.load_state_dict(lowercase__ )
__lowercase =DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule='scaled_linear', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=lowercase__, )
__lowercase =LDMPipeline(lowercase__, lowercase__, lowercase__ )
pipeline.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 141 | 1 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __magic_name__ ( A = "" ) -> dict[str, float]:
snake_case = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
snake_case = BeautifulSoup(requests.get(lowercase_ ).text , 'html.parser' )
snake_case = soup.find_all('td' , attrs='titleColumn' )
snake_case = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def __magic_name__ ( A = "IMDb_Top_250_Movies.csv" ) -> None:
snake_case = get_imdb_top_aaa_movies()
with open(lowercase_ , 'w' , newline='' ) as out_file:
snake_case = csv.writer(lowercase_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def snake_case__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def snake_case__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
'''simple docstring'''
lowercase__ : int = np.zeros_like(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase__ : Dict = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase__ : str = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase__ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
snake_case_ = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
snake_case_ = np.array(Image.open(lena_path))
# kernel to be applied
snake_case_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
snake_case_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
snake_case_ = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 214 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : int = """ClapFeatureExtractor"""
SCREAMING_SNAKE_CASE_ : List[str] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple)-> Optional[int]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__)
def __call__( self : int , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = kwargs.pop("sampling_rate" , UpperCamelCase__)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
__lowerCAmelCase: Any = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__)
if audios is not None:
__lowerCAmelCase: Optional[int] = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__)
if text is not None and audios is not None:
__lowerCAmelCase: int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__) , tensor_type=UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Tuple)-> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : str , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int])-> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__)
@property
def lowercase_ ( self : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.tokenizer.model_input_names
__lowerCAmelCase: Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 108 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_0**-1_0 ) -> float:
__lowerCAmelCase: Union[str, Any] = a
while True:
__lowerCAmelCase: Optional[int] = Decimal(__SCREAMING_SNAKE_CASE ) - (
Decimal(eval(__SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(__SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(__SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 108 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__snake_case : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
__snake_case : List[Any] = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _lowercase ( __snake_case ,__snake_case=False ) -> Any:
__lowerCAmelCase : Union[str, Any] = create_model(
"HTSAT-tiny" ,"roberta" ,lowerCAmelCase_ ,precision="fp32" ,device="cuda:0" if torch.cuda.is_available() else "cpu" ,enable_fusion=lowerCAmelCase_ ,fusion_type="aff_2d" if enable_fusion else None ,)
return model, model_cfg
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : Tuple = r'.*sequential.(\d+).*'
__lowerCAmelCase : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : Union[str, Any] = key.replace(lowerCAmelCase_ ,lowerCAmelCase_ )
if re.match(lowerCAmelCase_ ,lowerCAmelCase_ ):
# replace sequential layers with list
__lowerCAmelCase : List[str] = re.match(lowerCAmelCase_ ,lowerCAmelCase_ ).group(1 )
__lowerCAmelCase : Optional[Any] = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(lowerCAmelCase_ )//3}.linear.""" )
elif re.match(lowerCAmelCase_ ,lowerCAmelCase_ ):
__lowerCAmelCase : str = int(re.match(lowerCAmelCase_ ,lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase : Optional[Any] = 1 if projecton_layer == 0 else 2
__lowerCAmelCase : int = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase : str = value
__lowerCAmelCase : List[str] = mixed_qkv.size(0 ) // 3
__lowerCAmelCase : str = mixed_qkv[:qkv_dim]
__lowerCAmelCase : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase : Any = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase : List[Any] = query_layer
__lowerCAmelCase : Union[str, Any] = key_layer
__lowerCAmelCase : Tuple = value_layer
else:
__lowerCAmelCase : Dict = value
return model_state_dict
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=False ) -> int:
__lowerCAmelCase : Optional[Any] = init_clap(lowerCAmelCase_ ,enable_fusion=lowerCAmelCase_ )
clap_model.eval()
__lowerCAmelCase : Tuple = clap_model.state_dict()
__lowerCAmelCase : Optional[int] = rename_state_dict(lowerCAmelCase_ )
__lowerCAmelCase : List[str] = ClapConfig()
__lowerCAmelCase : Tuple = enable_fusion
__lowerCAmelCase : int = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
__snake_case : Any = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 269 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """vocab.txt"""}
__lowerCamelCase = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__lowerCamelCase = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
A__ = reader.readlines()
for index, token in enumerate(_A ):
A__ = token.rstrip('\n' )
A__ = index
return vocab
class UpperCamelCase__( SCREAMING_SNAKE_CASE_ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase=2_00 ) -> Tuple:
A__ = vocab
A__ = unk_token
A__ = max_input_chars_per_word
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
A__ = list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A__ = 0
A__ = []
while start < len(snake_case__ ):
A__ = len(snake_case__ )
A__ = None
while start < end:
A__ = ''.join(chars[start:end] )
if substr in self.vocab:
A__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A__ = end
return sub_tokens
class UpperCamelCase__( SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : List[str] = ['input_ids', 'attention_mask']
lowerCAmelCase__ : Optional[int] = False
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<d>" ,__UpperCAmelCase="</d>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="</n>" ,__UpperCAmelCase="</_>" ,__UpperCAmelCase="left" ,**__UpperCAmelCase ,) -> Dict:
requires_backends(self ,['jieba'] )
super().__init__(
bod_token=snake_case__ ,eod_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,pad_token=snake_case__ ,unk_token=snake_case__ ,line_token=snake_case__ ,space_token=snake_case__ ,padding_side=snake_case__ ,**snake_case__ ,)
A__ = bod_token
A__ = eod_token
A__ = load_vocab(snake_case__ )
A__ = self.encoder[space_token]
A__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A__ = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __UpperCAmelCase : x[1] ) )
A__ = {v: k for k, v in self.encoder.items()}
A__ = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token )
@property
def snake_case__ ( self ) -> Optional[int]:
return self.encoder[self.bod_token]
@property
def snake_case__ ( self ) -> Any:
return self.encoder[self.eod_token]
@property
def snake_case__ ( self ) -> int:
return self.encoder["\n"]
@property
def snake_case__ ( self ) -> List[Any]:
return len(self.encoder )
def snake_case__ ( self ) -> int:
return dict(self.encoder ,**self.added_tokens_encoder )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = []
for x in jieba.cut(snake_case__ ,cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
A__ = [i for i in token_ids if i >= 0]
A__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ ,**snake_case__ )
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
return token in self.encoder
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return "".join(snake_case__ )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[int]:
return self.encoder.get(snake_case__ ,self.encoder.get(self.unk_token ) )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
return self.decoder.get(snake_case__ ,self.unk_token )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple:
if os.path.isdir(snake_case__ ):
A__ = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
A__ = (filename_prefix + '-' if filename_prefix else '') + save_directory
A__ = 0
if " " in self.encoder:
A__ = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
A__ = self.encoder['\n']
del self.encoder["\n"]
A__ = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda __UpperCAmelCase : x[1] ) )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
A__ = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[Any]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> Optional[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ ,token_ids_a=snake_case__ ,already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 353 | """simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = BloomTokenizerFast
lowerCAmelCase__ : Union[str, Any] = BloomTokenizerFast
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : int = 'tokenizer_file'
lowerCAmelCase__ : Dict = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def snake_case__ ( self ) -> Optional[int]:
super().setUp()
A__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ,**__UpperCAmelCase ) -> int:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_rust_tokenizer()
A__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
A__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
A__ = tokenizer.batch_encode_plus(__UpperCAmelCase )['input_ids']
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
A__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.encode_plus(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.batch_encode_plus(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.encode(__UpperCAmelCase ,max_length=__UpperCAmelCase )
tokenizer_r.batch_encode_plus(__UpperCAmelCase ,max_length=__UpperCAmelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
A__ = None # Hotfixing padding = None
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Simple input
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Simple input
self.assertRaises(
__UpperCAmelCase ,tokenizer_r.batch_encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' ,)
# Pair input
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Pair input
self.assertRaises(__UpperCAmelCase ,tokenizer_r.encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' )
# Pair input
self.assertRaises(
__UpperCAmelCase ,tokenizer_r.batch_encode_plus ,__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding='max_length' ,)
def snake_case__ ( self ) -> Tuple:
A__ = self.get_rust_tokenizer()
A__ = load_dataset('xnli' ,'all_languages' ,split='test' ,streaming=__UpperCAmelCase )
A__ = next(iter(__UpperCAmelCase ) )['premise'] # pick up one data
A__ = list(sample_data.values() )
A__ = list(map(tokenizer.encode ,__UpperCAmelCase ) )
A__ = [tokenizer.decode(__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 154 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ) -> int:
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = False ) -> Optional[Any]:
_snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_snake_case = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_snake_case = 'cpu'
_snake_case = Path(__A )
# VAE DECODER
_snake_case = AutoencoderKL.from_pretrained(model_path + '/vae' )
_snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
_snake_case = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase : Union[str, Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 42 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , _a=8_192 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45 | 0 |
'''simple docstring'''
import argparse
import os
import re
_snake_case : List[str] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_snake_case : Dict = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
_snake_case : Union[str, Any] = re.compile(R'\s*\(\s*\"(\S[^\"]+)\"')
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : bool = False ):
'''simple docstring'''
with open(UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
_a = f.read()
_a = content.split('''\n''' )
_a = []
_a = 0
while line_idx < len(UpperCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(UpperCamelCase , key=lambda UpperCamelCase : _re_identifier.search(UpperCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase ) )
elif "\n".join(UpperCamelCase ) != content:
return True
def snake_case_ (UpperCamelCase : bool = False ):
'''simple docstring'''
_a = [os.path.join(UpperCamelCase , UpperCamelCase ) for f in os.listdir(UpperCamelCase ) if f.endswith('''.py''' )]
_a = [sort_auto_mapping(UpperCamelCase , overwrite=UpperCamelCase ) for fname in fnames]
if not overwrite and any(UpperCamelCase ):
_a = [f for f, d in zip(UpperCamelCase , UpperCamelCase ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(UpperCamelCase )}. Run `make style` to fix'
''' this.''' )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_snake_case : int = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 364 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def snake_case_ (UpperCamelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( _a ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any:
"""simple docstring"""
_a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
_a = model_type
_a = tf_checkpoint
_a = pytorch_dump_output
_a = config
_a = finetuning_task_name
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
_a = self._tf_checkpoint
_a = ''''''
else:
_a = self._tf_checkpoint
_a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 179 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = cva.getAffineTransform(__A , __A )
return cva.warpAffine(__A , __A , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
a__ : Optional[int] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Union[str, Any] = gray_img.shape
# set different points to rotate image
a__ : int = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : Tuple = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : Optional[Any] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : List[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : str = plt.figure(1)
a__ : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 80 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowerCamelCase : int ):
lowercase_ :Dict = []
lowercase_ :List[Any] = 2
lowercase_ :Optional[Any] = int(math.sqrt(__lowerCamelCase ) ) # Size of every segment
lowercase_ :Optional[Any] = [True] * (end + 1)
lowercase_ :Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCamelCase )
for i in range(start * start ,end + 1 ,__lowerCamelCase ):
lowercase_ :List[str] = False
start += 1
prime += in_prime
lowercase_ :Dict = end + 1
lowercase_ :Dict = min(2 * end ,__lowerCamelCase )
while low <= n:
lowercase_ :Any = [True] * (high - low + 1)
for each in in_prime:
lowercase_ :List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCamelCase ,high + 1 ,__lowerCamelCase ):
lowercase_ :str = False
for j in range(len(__lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowercase_ :Dict = high + 1
lowercase_ :Dict = min(high + end ,__lowerCamelCase )
return prime
print(sieve(10**6))
| 223 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() )
__UpperCamelCase =sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if metric == "rouge2":
__UpperCamelCase ='{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__UpperCamelCase ='{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__UpperCamelCase ='{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__UpperCamelCase ='{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__UpperCamelCase =ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , )
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> int:
__UpperCamelCase ={f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A_ )
@rank_zero_only
def _a ( self , A_ , A_ , A_ , A_=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
__UpperCamelCase =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__UpperCamelCase =Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase =od / 'test_results.txt'
__UpperCamelCase =od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase =od / f'{type_path}_results/{trainer.global_step:05d}.txt'
__UpperCamelCase =od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=A_ )
generations_file.parent.mkdir(exist_ok=A_ )
with open(A_ , 'a+' ) as writer:
for key in sorted(A_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase =metrics[key]
if isinstance(A_ , torch.Tensor ):
__UpperCamelCase =val.item()
__UpperCamelCase =f'{key}: {val:.6f}\n'
writer.write(A_ )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase ='\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(A_ )
@rank_zero_only
def _a ( self , A_ , A_ ) -> Optional[int]:
try:
__UpperCamelCase =pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase =pl_module.model.num_parameters()
__UpperCamelCase =count_trainable_parameters(A_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self , A_ , A_ ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A_ , A_ , 'test' )
@rank_zero_only
def _a ( self , A_ , A_ ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 117 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Features ):
__UpperCamelCase =np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Dict:
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__UpperCamelCase =path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__UpperCamelCase =_PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase =Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def _a ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
__UpperCamelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__UpperCamelCase =self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ = None , **A_ , ) -> List[Any]:
__UpperCamelCase =dataset
__UpperCamelCase =path_or_buf
__UpperCamelCase =batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase =parquet_writer_kwargs
def _a ( self ) -> int:
__UpperCamelCase =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase =self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
__UpperCamelCase =self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def _a ( self , A_ , A_ , **A_ ) -> int:
__UpperCamelCase =0
__UpperCamelCase =parquet_writer_kwargs.pop('path_or_buf' , A_ )
__UpperCamelCase =self.dataset.features.arrow_schema
__UpperCamelCase =pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase =query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 117 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : Dict = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase: str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase: Any = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase: str = ["input_ids", "attention_mask"]
__UpperCamelCase: List[str] = DistilBertTokenizer
def __init__( self : str , A : int=None , A : Tuple=None , A : Tuple=True , A : Dict="[UNK]" , A : List[Any]="[SEP]" , A : Optional[Any]="[PAD]" , A : Dict="[CLS]" , A : Tuple="[MASK]" , A : str=True , A : Dict=None , **A : List[Any] , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A ) != do_lower_case
or normalizer_state.get("strip_accents" , A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars
):
_UpperCAmelCase : Dict = getattr(A , normalizer_state.pop("type" ) )
_UpperCAmelCase : int = do_lower_case
_UpperCAmelCase : Optional[int] = strip_accents
_UpperCAmelCase : str = tokenize_chinese_chars
_UpperCAmelCase : List[Any] = normalizer_class(**A )
_UpperCAmelCase : Dict = do_lower_case
def _A ( self : List[Any] , A : Tuple , A : Any=None ):
_UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self : int , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : Dict , A : str , A : Optional[str] = None ):
_UpperCAmelCase : Any = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 31 | '''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[str] = None ):
_UpperCAmelCase : Dict = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Union[str, Any] = Extractor
def _A ( self : Tuple , A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def _A ( self : int , A : str , A : bool ):
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def _A ( self : Optional[int] , A : str , A : bool = False ):
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[Any] = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : str , A : Union[Path, str] , **A : Dict ):
...
@staticmethod
@abstractmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
...
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[bytes] = []
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
with open(A , "rb" ) as f:
return f.read(A )
@classmethod
def _A ( cls : Any , A : Union[Path, str] , A : bytes = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : int = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
def _A ( cls : str , A : Union[Path, str] , **A : List[Any] ):
return tarfile.is_tarfile(A )
@staticmethod
def _A ( A : Union[str, Any] , A : str ):
def resolved(A : str ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A : str , A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A : str , A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[str] = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_UpperCAmelCase : Optional[int] = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : int = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = [b"\x1F\x8B"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with gzip.open(A , "rb" ) as gzip_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _A ( cls : Dict , A : Union[Path, str] , A : bytes = b"" ):
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , "rb" ) as fp:
_UpperCAmelCase : Tuple = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Dict = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with lzma.open(A ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : List[str] = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(A , "rb" ) as ifh, open(A , "wb" ) as ofh:
dctx.copy_stream(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with bza.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , "r" ) as archive:
archive.extractall(A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : List[Any] ):
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def _A ( cls : Optional[Any] , A : Union[Path, str] , A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : Dict , A : Union[Path, str] ): # <Added version="2.4.0"/>
_UpperCAmelCase : Optional[int] = cls._get_magic_number_max_length()
_UpperCAmelCase : str = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def _A ( cls : List[str] , A : Union[Path, str] , A : Union[Path, str] , A : Optional[str] = None , A : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_UpperCAmelCase : Tuple = str(Path(A ).with_suffix(".lock" ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=A , )
_UpperCAmelCase : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 31 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase__ = """\
"""
UpperCAmelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCAmelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] = 16 , __lowerCAmelCase : List[Any] = True , __lowerCAmelCase : str=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = """cuda"""
else:
_UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = model.to(__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""pt""" , return_attention_mask=__lowerCAmelCase , ).to(__lowerCAmelCase )
_UpperCAmelCase = encodings["""input_ids"""]
_UpperCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__lowerCAmelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCAmelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCAmelCase )}
| 352 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :List[str] , **lowercase_ :str ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Optional[int] , **lowercase_ :Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :Tuple , **lowercase_ :List[str] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :int , **lowercase_ :Dict ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :List[str] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :Any , **lowercase_ :Tuple ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :int , **lowercase_ :Union[str, Any] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[int] ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[str] , **lowercase_ :List[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Tuple , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :List[str] , **lowercase_ :Tuple ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :Optional[int] , **lowercase_ :Any ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Tuple , **lowercase_ :List[str] ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowercase_ :Any , **lowercase_ :Dict ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :int , **lowercase_ :List[str] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Optional[Any] , **lowercase_ :Dict ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Optional[int] , **lowercase_ :int ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :str , **lowercase_ :Any ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :int , **lowercase_ :Optional[Any] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Any , **lowercase_ :Optional[int] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Optional[int] , **lowercase_ :str ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :int , **lowercase_ :Optional[int] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Tuple , **lowercase_ :List[Any] ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[Any] , **lowercase_ :int ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :int , **lowercase_ :int ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[Any] , **lowercase_ :Tuple ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :int , **lowercase_ :Union[str, Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Any , **lowercase_ :Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Optional[Any] , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :int , **lowercase_ :List[Any] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Any , **lowercase_ :int ) -> List[str]:
requires_backends(cls , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Union[str, Any] , **lowercase_ :List[Any] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Tuple , **lowercase_ :List[str] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[str] , **lowercase_ :Dict ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Any , **lowercase_ :List[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Union[str, Any] , **lowercase_ :str ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :int , **lowercase_ :Tuple ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :str , **lowercase_ :Tuple ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :str , **lowercase_ :str ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :Optional[int] , **lowercase_ :Union[str, Any] ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :str , **lowercase_ :List[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowercase_ :Dict , **lowercase_ :str ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Any , **lowercase_ :str ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Optional[Any] , **lowercase_ :str ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :str , **lowercase_ :Optional[int] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[str] , **lowercase_ :List[Any] ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :str , **lowercase_ :Optional[int] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowercase_ :Any , **lowercase_ :Any ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :int , **lowercase_ :Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Optional[int] , **lowercase_ :int ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :int , **lowercase_ :Any ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :List[Any] , **lowercase_ :Dict ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[Any] , **lowercase_ :Tuple ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Tuple , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Optional[int] , **lowercase_ :int ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :Optional[int] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Tuple , **lowercase_ :Tuple ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Optional[Any] , **lowercase_ :Union[str, Any] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :Union[str, Any] , **lowercase_ :Dict ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Optional[Any] , **lowercase_ :List[str] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :Tuple , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :List[str] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[str] , **lowercase_ :Dict ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Optional[Any] , **lowercase_ :Dict ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :Any ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :str , **lowercase_ :str ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :Optional[int] , **lowercase_ :int ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :Optional[int] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :str , **lowercase_ :Dict ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :List[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[Any] , **lowercase_ :Any ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :Dict , **lowercase_ :str ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Any , **lowercase_ :Dict ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :int , **lowercase_ :Dict ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[str] , **lowercase_ :Any ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Any , **lowercase_ :Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Optional[int] , **lowercase_ :List[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :int , **lowercase_ :Any ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :Any , **lowercase_ :List[Any] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :Union[str, Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :List[str] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Any , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :List[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowercase_ :Optional[int] , **lowercase_ :Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :Dict ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :int , *lowercase_ :Any , **lowercase_ :List[str] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :List[str] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :List[Any] , **lowercase_ :List[str] ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :List[str] ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Optional[int] , **lowercase_ :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :Optional[Any] , **lowercase_ :List[str] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :int , **lowercase_ :List[str] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Optional[int] , **lowercase_ :List[str] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :List[str] , **lowercase_ :int ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :str , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :str , **lowercase_ :str ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :int , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :str , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Optional[int] , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Optional[int] , **lowercase_ :List[Any] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[str] , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :Any ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :str , **lowercase_ :List[str] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[str] , **lowercase_ :List[str] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :List[str] , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :int , *lowercase_ :List[str] , **lowercase_ :Optional[Any] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Union[str, Any] , **lowercase_ :Tuple ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Optional[int] , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :List[str] , **lowercase_ :List[str] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Any , **lowercase_ :str ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :Union[str, Any] ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :int , **lowercase_ :int ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[Any] , **lowercase_ :Union[str, Any] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :int , **lowercase_ :List[Any] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :str , **lowercase_ :Any ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :List[Any] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowercase_ :List[Any] , **lowercase_ :Dict ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Tuple , **lowercase_ :Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :Optional[int] , **lowercase_ :Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :str ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Optional[Any] , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Optional[int] , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :Optional[Any] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Dict , **lowercase_ :Dict ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Dict , **lowercase_ :List[str] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :Dict , **lowercase_ :Union[str, Any] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :int , **lowercase_ :Optional[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :str , **lowercase_ :Tuple ) -> List[str]:
requires_backends(cls , ['torch'] )
| 78 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Dict = None
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
A__ : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
A__ : Dict = """▁"""
# Segments (not really needed)
A__ : List[str] = 0
A__ : List[Any] = 1
A__ : Union[str, Any] = 2
A__ : List[Any] = 3
A__ : str = 4
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = 'left'
lowerCamelCase : Optional[Any] = XLNetTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : int = do_lower_case
__lowerCamelCase : Optional[Any] = remove_space
__lowerCamelCase : int = keep_accents
__lowerCamelCase : Any = vocab_file
__lowerCamelCase : Any = False if not self.vocab_file else True
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Optional[Any] = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 185 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase = logging.getLogger()
lowerCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(_UpperCAmelCase , F"""{split}.{field}""" ) , "w" ) as f:
f.write(_UpperCAmelCase )
def lowercase__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : str = "pytorch" ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(_UpperCAmelCase , "output" )
UpperCAmelCase_ = os.path.join(_UpperCAmelCase , "data" )
self._create_dummy_data(data_dir=_UpperCAmelCase )
UpperCAmelCase_ = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
UpperCAmelCase_ = os.path.join(_UpperCAmelCase , "metrics.json" )
with open(_UpperCAmelCase ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
return result
@require_torch_gpu
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 367 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase = namedtuple("""CoinsDistribResult""", """moves excess""")
def a__ ( lowerCAmelCase__ ):
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase__ ) != count_coins(lowerCAmelCase__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowerCAmelCase__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase__ )
+ abs(lowerCAmelCase__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase__ , lowerCAmelCase__ )
return get_distrib(lowerCAmelCase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowercase_ = ["note_seq"]
def __init__( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(self , ['note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : str , **_lowerCAmelCase : int ):
requires_backends(cls , ['note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['note_seq'] ) | 225 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class A ( __lowerCamelCase ):
__snake_case = """mra"""
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=1, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__="absolute", UpperCamelCase__=4, UpperCamelCase__="full", UpperCamelCase__=0, UpperCamelCase__=0, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = block_per_row
lowerCAmelCase_ = approx_mode
lowerCAmelCase_ = initial_prior_first_n_blocks
lowerCAmelCase_ = initial_prior_diagonal_n_blocks
| 350 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_A = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = generator.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=3 , A=3_2 , A=3 , A=1_0 , A=[1_0, 2_0, 3_0, 4_0] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , ) -> Any:
snake_case : List[str] = parent
snake_case : List[Any] = batch_size
snake_case : Dict = image_size
snake_case : List[Any] = num_channels
snake_case : Dict = embeddings_size
snake_case : int = hidden_sizes
snake_case : List[Any] = depths
snake_case : List[Any] = is_training
snake_case : Optional[int] = use_labels
snake_case : Optional[int] = hidden_act
snake_case : Optional[int] = num_labels
snake_case : str = scope
snake_case : Dict = len(A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : int = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case : str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
snake_case : int = TFRegNetModel(config=A )
snake_case : Dict = model(A , training=A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase ( self , A , A , A ) -> Optional[int]:
snake_case : List[str] = self.num_labels
snake_case : int = TFRegNetForImageClassification(A )
snake_case : Union[str, Any] = model(A , labels=A , training=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : str = config_and_inputs
snake_case : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_snake_case = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> int:
snake_case : str = TFRegNetModelTester(self )
snake_case : Dict = ConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase ( self ) -> Optional[Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[str] = model_class(A )
snake_case : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[int] = [*signature.parameters.keys()]
snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(A , A , A ):
snake_case : List[str] = model_class(A )
snake_case : Optional[int] = model(**self._prepare_for_class(A , A ) , training=A )
snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Dict = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case : Union[str, Any] = layer_type
snake_case : Union[str, Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Optional[Any] = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A , A , A , A={} ):
snake_case : int = model(A , return_dict=A , **A )
snake_case : int = model(A , return_dict=A , **A ).to_tuple()
def recursive_check(A , A ):
if isinstance(A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(A , A ):
recursive_check(A , A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(A , A ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(A , A )
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(A )
snake_case : Optional[Any] = self._prepare_for_class(A , A )
snake_case : Any = self._prepare_for_class(A , A )
check_equivalence(A , A , A )
snake_case : List[str] = self._prepare_for_class(A , A , return_labels=A )
snake_case : Dict = self._prepare_for_class(A , A , return_labels=A )
check_equivalence(A , A , A )
snake_case : Dict = self._prepare_for_class(A , A )
snake_case : Any = self._prepare_for_class(A , A )
check_equivalence(A , A , A , {"""output_hidden_states""": True} )
snake_case : Any = self._prepare_for_class(A , A , return_labels=A )
snake_case : Dict = self._prepare_for_class(A , A , return_labels=A )
check_equivalence(A , A , A , {"""output_hidden_states""": True} )
def UpperCAmelCase ( self ) -> Any:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[Any] = TFRegNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
snake_case : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> Dict:
snake_case : Dict = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case : Union[str, Any] = self.default_image_processor
snake_case : Optional[int] = prepare_img()
snake_case : List[str] = image_processor(images=A , return_tensors="""tf""" )
# forward pass
snake_case : Tuple = model(**A , training=A )
# verify the logits
snake_case : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
snake_case : Union[str, Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , A , atol=1e-4 )
| 124 |
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]:
return [list(range(1000 - i ,-1000 - i ,-1 ) ) for i in range(1000 )]
lowerCamelCase : List[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
assert all(row == sorted(lowercase ,reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase ,reverse=lowercase ) for col in zip(*lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = 0
snake_case : List[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case : Tuple = (left + right) // 2
snake_case : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case : List[Any] = mid + 1
else:
snake_case : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Dict = len(grid[0] )
for i in range(len(lowercase ) ):
snake_case : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case : int = timeit(f"""{func}(grid=grid)""" ,setup=lowercase ,number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : List[str] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = '''marian'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
UpperCAmelCase__ : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _snake_case : List[str]=58101 , _snake_case : List[Any]=None , _snake_case : List[Any]=1024 , _snake_case : Union[str, Any]=12 , _snake_case : int=4096 , _snake_case : Optional[Any]=16 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[int]=4096 , _snake_case : int=16 , _snake_case : Tuple=0.0 , _snake_case : Optional[int]=0.0 , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=True , _snake_case : Any="gelu" , _snake_case : Optional[Any]=1024 , _snake_case : Optional[Any]=0.1 , _snake_case : Union[str, Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Tuple=0.0_2 , _snake_case : str=58100 , _snake_case : Union[str, Any]=False , _snake_case : Any=58100 , _snake_case : Dict=0 , _snake_case : Union[str, Any]=0 , _snake_case : Any=True , **_snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = decoder_vocab_size or vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , )
class __snake_case ( a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
UpperCAmelCase_ = {0: '''batch'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_snake_case):
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super().outputs
else:
UpperCAmelCase_ = super(_snake_case , self).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_snake_case):
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCamelCase ( self : Tuple , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
# Generate decoder inputs
UpperCAmelCase_ = seq_length if not self.use_past else 1
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ = dict(**_snake_case , **_snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
UpperCAmelCase_ = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = decoder_seq_length + 3
UpperCAmelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_snake_case , _snake_case)] , dim=1)
UpperCAmelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ = min(_snake_case , _snake_case)
UpperCAmelCase_ = max(_snake_case , _snake_case) - min_num_layers
UpperCAmelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_snake_case):
common_inputs["past_key_values"].append(
(
torch.zeros(_snake_case),
torch.zeros(_snake_case),
torch.zeros(_snake_case),
torch.zeros(_snake_case),
))
# TODO: test this.
UpperCAmelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_snake_case , _snake_case):
common_inputs["past_key_values"].append((torch.zeros(_snake_case), torch.zeros(_snake_case)))
return common_inputs
def lowerCamelCase ( self : Any , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = common_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(_snake_case)
]
return common_inputs
def lowerCamelCase ( self : Dict , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(_snake_case)
UpperCAmelCase_ = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case)
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size
UpperCAmelCase_ = dict(tokenizer(_snake_case , return_tensors=_snake_case))
return common_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
else:
UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
return common_inputs
def lowerCamelCase ( self : Any , _snake_case : Tuple , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case)
else:
UpperCAmelCase_ = super(_snake_case , self)._flatten_past_key_values_(
_snake_case , _snake_case , _snake_case , _snake_case)
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return 1e-4
| 366 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = "\\n Text data.\n Second line of data."
lowercase = "file"
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / (FILE_PATH + '.zstd')
snake_case_ = bytes(a_ , 'utf-8')
with zstd.open(a_ , 'wb') as f:
f.write(a_)
return path
@pytest.fixture
def __UpperCAmelCase ( a_):
with open(os.path.join(tmpfs.local_root_dir , a_) , 'w') as f:
f.write(a_)
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_):
snake_case_ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / 'cache'
snake_case_ = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_)
snake_case_ = cached_path(a_ , download_config=a_)
with open(a_) as f:
snake_case_ = f.read()
with open(a_) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False])
@pytest.mark.parametrize('default_cache_dir' , [True, False])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_):
snake_case_ = 'custom_cache'
snake_case_ = 'custom_extracted_dir'
snake_case_ = tmp_path / 'custom_extracted_path'
if default_extracted:
snake_case_ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , a_)
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a_))
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=a_)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_)
)
snake_case_ = cached_path(a_ , download_config=a_)
assert Path(a_).parent.parts[-2:] == expected
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(Path(a_).resolve())
assert cached_path(a_) == text_file
# relative path
snake_case_ = str(Path(a_).resolve().relative_to(Path(os.getcwd())))
assert cached_path(a_) == text_file
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(tmp_path.resolve() / '__missing_file__.txt')
with pytest.raises(a_):
cached_path(a_)
# relative path
snake_case_ = './__missing_file__.txt'
with pytest.raises(a_):
cached_path(a_)
def __UpperCAmelCase ( a_):
snake_case_ = get_from_cache(f'''tmp://{tmpfs_file}''')
with open(a_) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( ):
with pytest.raises(a_):
cached_path('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
http_get('https://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
http_head('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
ftp_get('ftp://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
ftp_head('ftp://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
fsspec_get('s3://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
fsspec_head('s3://huggingface.co')
| 178 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def snake_case_ ( __SCREAMING_SNAKE_CASE : NDArray[floataa] , __SCREAMING_SNAKE_CASE : NDArray[floataa] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
lowercase_ , lowercase_ : Union[str, Any] = coefficient_matrix.shape
lowercase_ , lowercase_ : Any = constant_matrix.shape
if rowsa != colsa:
lowercase_ : Dict = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
if colsa != 1:
lowercase_ : List[str] = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
if rowsa != rowsa:
lowercase_ : int = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != rowsa:
lowercase_ : List[Any] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'''matrix but received {len(__SCREAMING_SNAKE_CASE )} and {rowsa}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowercase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase_ , lowercase_ : Optional[Any] = table.shape
strictly_diagonally_dominant(__SCREAMING_SNAKE_CASE )
# Iterates the whole matrix for given number of times
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = []
for row in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = 0
for col in range(__SCREAMING_SNAKE_CASE ):
if col == row:
lowercase_ : Optional[int] = table[row][col]
elif col == cols - 1:
lowercase_ : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase_ : Optional[int] = (temp + val) / denom
new_val.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = new_val
return [float(__SCREAMING_SNAKE_CASE ) for i in new_val]
def snake_case_ ( __SCREAMING_SNAKE_CASE : NDArray[floataa] ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = table.shape
lowercase_ : int = True
for i in range(0 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
lowercase_ : Any = key
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
lowercase_ : str = ''''''
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key )
return ans
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
lowercase_ : Dict = ''''''
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key )
return ans
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
with open(__SCREAMING_SNAKE_CASE ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
with open(__SCREAMING_SNAKE_CASE ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 264 | 1 |
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302 | 1 |
from bisect import bisect
from itertools import accumulate
def _a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE__ : int = list(accumulate(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = bisect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Any = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , 0 )
SCREAMING_SNAKE_CASE__ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _a ( SCREAMING_SNAKE_CASE__ : int = 30 ) -> int:
'''simple docstring'''
return _calculate(SCREAMING_SNAKE_CASE__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 191 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __a ):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __a ):
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Union[str, Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = metric_id
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Optional[int] = [MetricMock(snake_case_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
if "tmp_path" in args:
snake_case_ : Optional[Any] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__a , match='https://huggingface.co/docs/evaluate' ):
func(*__a )
| 327 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 327 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ):
'''simple docstring'''
# Construct model
if openai_config_file == "":
_lowerCAmelCase : Optional[int] = OpenAIGPTConfig()
else:
_lowerCAmelCase : Optional[Any] = OpenAIGPTConfig.from_json_file(UpperCamelCase_ )
_lowerCAmelCase : Tuple = OpenAIGPTModel(UpperCamelCase_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
_lowerCAmelCase : List[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_lowerCAmelCase : List[str] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 159 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __snake_case (_a ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : str = """pt"""
_lowerCAmelCase : List[Any] = """tf"""
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase )
model_tf.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """mock_framework"""
# Framework provided - return whatever the user provides
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : str = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : str = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : Tuple = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
| 159 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _A ( UpperCamelCase_ : str) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs))
def _A ( UpperCamelCase_ : Tuple) -> Optional[Any]:
'''simple docstring'''
__lowercase = np.max(_outputs, axis=-1, keepdims=UpperCamelCase_)
__lowercase = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=UpperCamelCase_)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "sigmoid"
__UpperCAmelCase : int = "softmax"
__UpperCAmelCase : Tuple = "none"
@add_end_docstrings(
lowercase ,R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : int = ClassificationFunction.NONE
def __init__( self : int, **UpperCAmelCase__ : Union[str, Any] ):
super().__init__(**UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self : List[str], UpperCAmelCase__ : str=None, UpperCAmelCase__ : Dict=None, UpperCAmelCase__ : Optional[Any]="", **UpperCAmelCase__ : List[Any] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__lowercase = tokenizer_kwargs
__lowercase = {}
if hasattr(self.model.config, "return_all_scores" ) and return_all_scores is None:
__lowercase = self.model.config.return_all_scores
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) or top_k is None:
__lowercase = top_k
__lowercase = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", UpperCAmelCase__, )
if return_all_scores:
__lowercase = None
else:
__lowercase = 1
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowercase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any], *UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : List[str] ):
__lowercase = super().__call__(*UpperCAmelCase__, **UpperCAmelCase__ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowercase = "top_k" not in kwargs
if isinstance(args[0], UpperCAmelCase__ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self : List[str], UpperCAmelCase__ : str, **UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.framework
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
return self.tokenizer(**UpperCAmelCase__, return_tensors=UpperCAmelCase__, **UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) == 1 and isinstance(inputs[0], UpperCAmelCase__ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=UpperCAmelCase__, **UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(UpperCAmelCase__, return_tensors=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[str] ):
return self.model(**UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : List[str]=1, UpperCAmelCase__ : Dict=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowercase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowercase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply" ) and function_to_apply is None:
__lowercase = self.model.config.function_to_apply
else:
__lowercase = ClassificationFunction.NONE
__lowercase = model_outputs["logits"][0]
__lowercase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowercase = sigmoid(UpperCAmelCase__ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowercase = softmax(UpperCAmelCase__ )
elif function_to_apply == ClassificationFunction.NONE:
__lowercase = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowercase = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(UpperCAmelCase__ )
]
if not _legacy:
dict_scores.sort(key=lambda UpperCAmelCase__ : x["score"], reverse=UpperCAmelCase__ )
if top_k is not None:
__lowercase = dict_scores[:top_k]
return dict_scores
| 17 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
__lowerCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(_UpperCamelCase )[0].split("." )[-2]
__lowerCAmelCase = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase = "weight_v"
elif "weight" in name:
__lowerCAmelCase = "weight"
elif "bias" in name:
__lowerCAmelCase = "bias"
else:
__lowerCAmelCase = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = full_name.split("conv_layers." )[-1]
__lowerCAmelCase = name.split("." )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True ):
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = HubertConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(_UpperCamelCase , "vocab.json" )
if not os.path.isdir(_UpperCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
__lowerCAmelCase = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_UpperCamelCase , )
__lowerCAmelCase = True if config.feat_extract_norm == "layer" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowerCAmelCase = HubertForCTC(_UpperCamelCase )
else:
__lowerCAmelCase = HubertModel(_UpperCamelCase )
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A : str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 259 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A : Any = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int ="""AutoTokenizer"""
__UpperCAmelCase : Union[str, Any] =["""tokenizer"""]
__UpperCAmelCase : Tuple ={
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __a , __a=None ):
super().__init__(__a )
__lowerCAmelCase = speaker_embeddings
@classmethod
def snake_case ( cls , __a , __a="speaker_embeddings_path.json" , **__a ):
if speaker_embeddings_dict_path is not None:
__lowerCAmelCase = get_file_from_repo(
__a , __a , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(__a , __a )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
__lowerCAmelCase = None
else:
with open(__a ) as speaker_embeddings_json:
__lowerCAmelCase = json.load(__a )
else:
__lowerCAmelCase = None
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a , **__a )
return cls(tokenizer=__a , speaker_embeddings=__a )
def snake_case ( self , __a , __a="speaker_embeddings_path.json" , __a="speaker_embeddings" , __a = False , **__a , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__a , __a , "v2" ) , exist_ok=__a )
__lowerCAmelCase = {}
__lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__lowerCAmelCase = self._load_voice_preset(__a )
__lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , __a , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__a , )
__lowerCAmelCase = os.path.join(__a , f"{prompt_key}_{key}.npy" )
__lowerCAmelCase = tmp_dict
with open(os.path.join(__a , __a ) , "w" ) as fp:
json.dump(__a , __a )
super().save_pretrained(__a , __a , **__a )
def snake_case ( self , __a = None , **__a ):
__lowerCAmelCase = self.speaker_embeddings[voice_preset]
__lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
__lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
__lowerCAmelCase = np.load(__a )
return voice_preset_dict
def snake_case ( self , __a = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __a=None , __a=None , __a="pt" , __a=2_56 , __a=False , __a=True , __a=False , **__a , ):
if voice_preset is not None and not isinstance(__a , __a ):
if (
isinstance(__a , __a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__lowerCAmelCase = self._load_voice_preset(__a )
else:
if isinstance(__a , __a ) and not voice_preset.endswith(".npz" ):
__lowerCAmelCase = voice_preset + ".npz"
__lowerCAmelCase = np.load(__a )
if voice_preset is not None:
self._validate_voice_preset_dict(__a , **__a )
__lowerCAmelCase = BatchFeature(data=__a , tensor_type=__a )
__lowerCAmelCase = self.tokenizer(
__a , return_tensors=__a , padding="max_length" , max_length=__a , return_attention_mask=__a , return_token_type_ids=__a , add_special_tokens=__a , **__a , )
if voice_preset is not None:
__lowerCAmelCase = voice_preset
return encoded_text
| 259 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __A :
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , ) -> int:
"""simple docstring"""
_a = pos_x
_a = pos_y
_a = (pos_y, pos_x)
_a = goal_x
_a = goal_y
_a = g_cost
_a = parent
_a = self.calculate_heuristic()
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = abs(self.pos_x - self.goal_x )
_a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self , A ) -> List[Any]:
"""simple docstring"""
return self.f_cost < other.f_cost
class __A :
'''simple docstring'''
def __init__(self , A , A ) -> Any:
"""simple docstring"""
_a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
_a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __lowerCamelCase )
_a = [self.start]
_a = []
_a = False
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_a = True
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
_a = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
_a = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
_a = []
for action in delta:
_a = parent.pos_x + action[1]
_a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def a__ (self , A ) -> Dict:
"""simple docstring"""
_a = node
_a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 211 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : str = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _lowercase :
"""simple docstring"""
A__ = True
A__ = None
# Automatically constructed
A__ = "PIL.Image.Image"
A__ = pa.struct({"bytes": pa.binary(), "path": pa.string()})
A__ = field(default="Image" , init=lowercase__ , repr=lowercase__)
def __call__( self : Any ):
'''simple docstring'''
return self.pa_type
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : str = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = PIL.Image.open(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = path.split("::" )[-1]
try:
lowerCamelCase__ : str = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCamelCase__ : Any = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
lowerCamelCase__ : int = None
with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f:
lowerCamelCase__ : List[str] = BytesIO(f.read() )
lowerCamelCase__ : Optional[int] = PIL.Image.open(bytes_ )
else:
lowerCamelCase__ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
lowerCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ : List[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCamelCase__ : Dict = storage.field("bytes" )
else:
lowerCamelCase__ : Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCamelCase__ : Dict = storage.field("path" )
else:
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase__ : Union[str, Any] = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowerCAmelCase ( self : int , __lowerCamelCase : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Union[str, Any] ):
with xopen(__lowerCamelCase , "rb" ) as f:
lowerCamelCase__ : str = f.read()
return bytes_
lowerCamelCase__ : List[Any] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ : Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowercase_ ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase__ : List[str] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase__ : int = image.format
else:
lowerCamelCase__ : int = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_A , format=_A )
return buffer.getvalue()
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
if hasattr(_A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : np.ndarray ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowerCamelCase__ : int = array.dtype
lowerCamelCase__ : List[str] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowerCamelCase__ : List[str] = dtype.kind
lowerCamelCase__ : Optional[Any] = dtype.itemsize
lowerCamelCase__ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase__ : List[Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase__ : Any = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase__ : Optional[Any] = dtype_byteorder + dtype_kind + str(_A )
lowerCamelCase__ : int = np.dtype(_A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
lowerCamelCase__ : List[Any] = PIL.Image.fromarray(array.astype(_A ) )
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowerCamelCase__ , lowerCamelCase__ : int = first_non_null_value(_A )
if isinstance(_A , _A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_A , np.ndarray ):
lowerCamelCase__ : Optional[Any] = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
elif isinstance(_A , PIL.Image.Image ):
lowerCamelCase__ : int = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
else:
return objs
else:
return objs
| 184 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : Dict , _a : Dict=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case_ : str = nn.Parameter(lowerCamelCase__ )
def lowerCAmelCase__ ( _a : List[str] , _a : Union[str, Any] , _a : int ):
# set torch weights for 1-to-1 comparison
snake_case_ : Dict = np.asarray(weights[0] )
snake_case_ : Optional[int] = np.asarray(weights[1] )
snake_case_ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( _a : Optional[Any] , _a : List[str] , _a : str ):
# set torch weights for 1-to-1 comparison
snake_case_ : Optional[int] = np.asarray(weights[0] )
snake_case_ : Union[str, Any] = np.asarray(weights[1] )
snake_case_ : Optional[Any] = np.asarray(weights[2] )
snake_case_ : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( _a : Tuple , _a : Optional[int] , _a : str ):
# layernorm 1
snake_case_ : Optional[Any] = weights[0][0][0]
snake_case_ : Optional[int] = np.asarray(layer_norm_a[0] )
snake_case_ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
snake_case_ : Dict = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
snake_case_ : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
snake_case_ : List[str] = intermediate_weights[2]
# layernorm 2
snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] )
snake_case_ : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
snake_case_ : Optional[int] = np.asarray(intermediate_weights[1][0] )
snake_case_ : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
snake_case_ : Tuple = np.asarray(intermediate_weights[4][0] )
snake_case_ : Optional[int] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def lowerCAmelCase__ ( _a : int , _a : Union[str, Any] , _a : str ):
# reformer model
snake_case_ : List[Any] = torch_model.reformer
# word embeds
snake_case_ : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
snake_case_ : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ : Optional[int] = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
snake_case_ : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
snake_case_ : Optional[Any] = np.asarray(weights[7][0] )
snake_case_ : Tuple = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
snake_case_ : Optional[int] = np.asarray(weights[9][0] )
snake_case_ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def lowerCAmelCase__ ( _a : str , _a : Optional[int] , _a : Optional[int] ):
# Initialise PyTorch model
snake_case_ : Union[str, Any] = ReformerConfig.from_json_file(lowerCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ : Tuple = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as f:
snake_case_ : Tuple = pickle.load(lowerCamelCase__ )['''weights''']
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase : Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 354 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'conditional_detr'
A : Optional[int] = ['past_key_values']
A : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[int] = backbone_config.get("model_type" )
snake_case_ : str = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Tuple = config_class.from_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = use_timm_backbone
snake_case_ : Optional[Any] = backbone_config
snake_case_ : str = num_channels
snake_case_ : Optional[Any] = num_queries
snake_case_ : Optional[Any] = d_model
snake_case_ : Optional[Any] = encoder_ffn_dim
snake_case_ : str = encoder_layers
snake_case_ : int = encoder_attention_heads
snake_case_ : int = decoder_ffn_dim
snake_case_ : Optional[Any] = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : List[str] = dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = activation_dropout
snake_case_ : List[Any] = activation_function
snake_case_ : Dict = init_std
snake_case_ : str = init_xavier_std
snake_case_ : Tuple = encoder_layerdrop
snake_case_ : int = decoder_layerdrop
snake_case_ : List[Any] = encoder_layers
snake_case_ : int = auxiliary_loss
snake_case_ : int = position_embedding_type
snake_case_ : List[str] = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Optional[Any] = dilation
# Hungarian matcher
snake_case_ : Tuple = class_cost
snake_case_ : Tuple = bbox_cost
snake_case_ : str = giou_cost
# Loss coefficients
snake_case_ : Union[str, Any] = mask_loss_coefficient
snake_case_ : Tuple = dice_loss_coefficient
snake_case_ : List[str] = cls_loss_coefficient
snake_case_ : List[str] = bbox_loss_coefficient
snake_case_ : List[str] = giou_loss_coefficient
snake_case_ : Any = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ) -> int:
return self.d_model
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Optional[int] = self.backbone_config.to_dict()
snake_case_ : Optional[int] = self.__class__.model_type
return output
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = version.parse('1.11' )
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCAmelCase ( self ) -> float:
return 1e-5
@property
def _lowerCAmelCase ( self ) -> int:
return 12
| 36 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __SCREAMING_SNAKE_CASE (snake_case__ ):
"""simple docstring"""
__a =field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
__a =Features({'text': Value('string' )} )
__a =Features({'summary': Value('string' )} )
__a ='text'
__a ='summary'
@property
def UpperCamelCase__ ( self : Optional[int] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 63 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) , torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) , gelu_new(__snake_case ) ) )
def a_ ( self ):
snake_case = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case = get_activation('''gelu''' )
snake_case = get_activation('''gelu_10''' )
snake_case = torch_builtin(__snake_case )
snake_case = geluaa(__snake_case )
snake_case = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def a_ ( self ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__snake_case ):
get_activation('''bogus''' )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def a_ ( self ):
snake_case = get_activation('''gelu''' )
snake_case = 1
snake_case = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__snake_case ):
snake_case = acta.a
| 127 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]=13 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=99 , lowerCAmelCase__ : Optional[int]=32 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[int]=37 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Optional[Any]=None , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = '''gelu'''
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = TFRoFormerModel(config=lowerCAmelCase__ )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = TFRoFormerForCausalLM(config=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(lowerCAmelCase__ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFRoFormerForMaskedLM(config=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFRoFormerForSequenceClassification(config=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFRoFormerForMultipleChoice(config=lowerCAmelCase__ )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFRoFormerForTokenClassification(config=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFRoFormerForQuestionAnswering(config=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Any = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : Optional[Any] = False
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TFRoFormerModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(lowerCAmelCase__ )[0]
# TODO Replace vocab size
_UpperCamelCase = 50000
_UpperCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , lowerCAmelCase__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_UpperCamelCase = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = 1e-4
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = tf.constant([[4, 10]] )
_UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_UpperCamelCase = emba(input_ids.shape )
_UpperCamelCase = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , atol=self.tolerance )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_UpperCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , atol=self.tolerance )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = 1e-4
def snake_case__ ( self : List[str] ) -> str:
'''simple docstring'''
_UpperCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_UpperCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_UpperCamelCase = embed_positions([2, 16, 768] )[None, None, :, :]
_UpperCamelCase , _UpperCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_UpperCamelCase = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCAmelCase__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCAmelCase__ , atol=self.tolerance )
| 287 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : int = 32
def a__ ( lowercase : Accelerator, lowercase : int = 16 ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(lowercase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase = datasets.map(
lowercase, batched=lowercase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase = 8
else:
_UpperCamelCase = None
return tokenizer.pad(
lowercase, padding='''longest''', max_length=lowercase, pad_to_multiple_of=lowercase, return_tensors='''pt''', )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : str = mocked_dataloaders # noqa: F811
def a__ ( lowercase : List[Any], lowercase : List[str] ) -> Any:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowercase ) == "1":
_UpperCamelCase = 2
# Initialize accelerator
_UpperCamelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = evaluate.load('''glue''', '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = AdamW(params=model.parameters(), lr=lowercase )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(lowercase, lowercase )
# Instantiate scheduler
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=100, num_training_steps=(len(lowercase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase, references=lowercase, )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowercase, default=lowercase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 287 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 259 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = (3, 32, 128)
UpperCamelCase :Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
UpperCamelCase :Tuple = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = '''test'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''test'''
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.randn(1 , 27 , 38 )
UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 259 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0.01 , lowerCamelCase_=10_00 ) -> Dict:
lowerCAmelCase__ = p_stop
lowerCAmelCase__ = max_length
def __iter__( self ) -> List[Any]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase__ = random.random() < self.p_stop
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True ) -> str:
lowerCAmelCase__ = [
BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
for i in range(2 )
]
lowerCAmelCase__ = [list(lowerCamelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase_ ) for shard in batch_sampler_shards] , [len(lowerCamelCase_ ) for e in expected] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase__ = [BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , even_batches=lowerCamelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=False ) -> Union[str, Any]:
random.seed(lowerCamelCase_ )
lowerCAmelCase__ = list(lowerCamelCase_ )
lowerCAmelCase__ = [
IterableDatasetShard(
lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ , num_processes=lowerCamelCase_ , process_index=lowerCamelCase_ , split_batches=lowerCamelCase_ , )
for i in range(lowerCamelCase_ )
]
lowerCAmelCase__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase_ )
iterable_dataset_lists.append(list(lowerCamelCase_ ) )
lowerCAmelCase__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(len(lowerCamelCase_ ) % shard_batch_size == 0 )
lowerCAmelCase__ = []
for idx in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
reference += reference
self.assertListEqual(lowerCamelCase_ , reference[: len(lowerCamelCase_ )] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = 42
lowerCAmelCase__ = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Edge case with a very small dataset
lowerCAmelCase__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = SkipBatchSampler(lowerCamelCase_ , 2 )
self.assertListEqual(list(lowerCamelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase__ = skip_first_batches(lowerCamelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
Accelerator()
lowerCAmelCase__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 228 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__UpperCAmelCase = {
'''junnyu/roformer_chinese_small''': 1_536,
'''junnyu/roformer_chinese_base''': 1_536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__UpperCAmelCase = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Tuple = RoFormerTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Tuple:
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents
):
lowerCAmelCase__ = getattr(lowerCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = pre_tok_class(**lowerCamelCase_ )
lowerCAmelCase__ = do_lower_case
def __getstate__( self ) -> Any:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = BertPreTokenizer()
return state
def __setstate__( self , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = d
lowerCAmelCase__ = self.__dict__['''_tokenizer'''].get_vocab()
lowerCAmelCase__ = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Union[str, Any]:
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
lowerCAmelCase__ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Union[str, Any]:
lowerCAmelCase__ = BertPreTokenizer()
return super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) | 228 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : int = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='git_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=16, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='git'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=6, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=1_024, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=0, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=101, lowerCAmelCase=102, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, pad_token_id=lowerCAmelCase, **lowerCAmelCase )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
lowerCamelCase_ =GitVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =tie_word_embeddings
lowerCamelCase_ =num_image_with_embedding
lowerCamelCase_ =bos_token_id
lowerCamelCase_ =eos_token_id
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 75 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a_ : List[Any] = logging.get_logger(__name__)
a_ : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a_ ( __snake_case : str ) -> Any:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ =model_type_to_module_name(__snake_case )
lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ =importlib.import_module('''transformers''' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__snake_case , encoding='''utf-8''' ) as reader:
return json.load(__snake_case )
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase )
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase )
lowerCamelCase_ =True
lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase )
lowerCamelCase_ =None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ):
lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase )
if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase )
lowerCamelCase_ =feature_extractor_auto_map is not None
lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ =resolve_trust_remote_code(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if has_remote_code and trust_remote_code:
lowerCamelCase_ =get_class_from_dynamic_module(
lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )]
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
| 75 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( __magic_name__ : int ):
a__: int =filter(lambda __magic_name__ : p.requires_grad , model.parameters() )
a__: Tuple =sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( __magic_name__ : Any , __magic_name__ : str ):
if metric == "rouge2":
a__: str ="{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a__: List[str] ="{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a__: Union[str, Any] ="{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function." )
a__: Any =ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=F"val_{metric}" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ):
return EarlyStopping(
monitor=F"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class lowerCamelCase__ ( pl.Callback ):
def _lowerCamelCase ( self : int , _a : Tuple , _a : Optional[Any] ):
a__: str ={F"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A__ )
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , _a : Optional[int] , _a : str , _a : List[Any] , _a : Optional[Any]=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
a__: List[Any] =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
a__: Any =Path(pl_module.hparams.output_dir )
if type_path == "test":
a__: Optional[Any] =od / "test_results.txt"
a__: Optional[int] =od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a__: Dict =od / F"{type_path}_results/{trainer.global_step:05d}.txt"
a__: Optional[int] =od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A__ )
generations_file.parent.mkdir(exist_ok=A__ )
with open(A__ , "a+" ) as writer:
for key in sorted(A__ ):
if key in ["log", "progress_bar", "preds"]:
continue
a__: Any =metrics[key]
if isinstance(A__ , torch.Tensor ):
a__: int =val.item()
a__: Union[str, Any] =F"{key}: {val:.6f}\n"
writer.write(A__ )
if not save_generations:
return
if "preds" in metrics:
a__: Any ="\n".join(metrics["preds"] )
generations_file.open("w+" ).write(A__ )
@rank_zero_only
def _lowerCamelCase ( self : Any , _a : Tuple , _a : str ):
try:
a__: List[str] =pl_module.model.model.num_parameters()
except AttributeError:
a__: List[Any] =pl_module.model.num_parameters()
a__: Optional[int] =count_trainable_parameters(A__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _lowerCamelCase ( self : Any , _a : Any , _a : List[Any] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A__ , A__ , "test" )
@rank_zero_only
def _lowerCamelCase ( self : List[Any] , _a : Union[str, Any] , _a : List[Any] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 358 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: Any =random.randint(0 , len(__magic_name__ ) - 1 )
a__: Tuple =parent_a[:random_slice] + parent_a[random_slice:]
a__: List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] ):
a__: str =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__: Union[str, Any] =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ):
a__: List[Any] =[]
# Generate more children proportionally to the fitness score.
a__: Dict =int(parent_a[1] * 100 ) + 1
a__: Tuple =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
a__: List[str] =population_score[random.randint(0 , __magic_name__ )][0]
a__ , a__: Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a__: Any =F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
a__: int =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__: str =F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__magic_name__ )
# Generate random starting population.
a__: Tuple =[]
for _ in range(__magic_name__ ):
population.append("".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__: Any =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__: Dict =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
a__: Any =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__: Optional[int] =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
a__: List[str] =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 42 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase : Dict = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ["ViTFeatureExtractor"]
UpperCAmelCase : int = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92 | 0 |
"""simple docstring"""
def _A ( _a : str ):
"""simple docstring"""
A = [int(_a ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(_a ) == 4 and all(0 <= int(_a ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase =input().strip()
UpperCAmelCase ="valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 77 |
"""simple docstring"""
import random
def _A ( _a : list , _a : Any ):
"""simple docstring"""
A , A , A = [], [], []
for element in data:
if element < pivot:
less.append(_a )
elif element > pivot:
greater.append(_a )
else:
equal.append(_a )
return less, equal, greater
def _A ( _a : list , _a : int ):
"""simple docstring"""
if index >= len(_a ) or index < 0:
return None
A = items[random.randint(0 , len(_a ) - 1 )]
A = 0
A , A , A = _partition(_a , _a )
A = len(_a )
A = len(_a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_a , _a )
# must be in larger
else:
return quick_select(_a , index - (m + count) )
| 77 | 1 |
'''simple docstring'''
a__ : List[Any] ={0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
a__ : Optional[int] ={0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowercase__ ( __lowercase : dict[int, list[int]] , __lowercase : int , __lowercase : list[bool] ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = True
__UpperCamelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__lowercase , __lowercase , __lowercase )
order.append(__lowercase )
return order
def lowercase__ ( __lowercase : dict[int, list[int]] , __lowercase : int , __lowercase : list[bool] ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = True
__UpperCamelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__lowercase , __lowercase , __lowercase )
return component
def lowercase__ ( __lowercase : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
__UpperCamelCase = len(__lowercase ) * [False]
__UpperCamelCase = {vert: [] for vert in range(len(__lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__lowercase )
__UpperCamelCase = []
for i, was_visited in enumerate(__lowercase ):
if not was_visited:
order += topology_sort(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = []
__UpperCamelCase = len(__lowercase ) * [False]
for i in range(len(__lowercase ) ):
__UpperCamelCase = order[len(__lowercase ) - i - 1]
if not visited[vert]:
__UpperCamelCase = find_components(__lowercase , __lowercase , __lowercase )
components_list.append(__lowercase )
return components_list
| 53 |
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__UpperCamelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__UpperCamelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__UpperCamelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
return float((preds == labels).mean() )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple="binary" ) -> List[str]:
SCREAMING_SNAKE_CASE = simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = float(fa_score(y_true=SCREAMING_SNAKE_CASE_ , y_pred=SCREAMING_SNAKE_CASE_ , average=SCREAMING_SNAKE_CASE_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = {}
for id_pred, label in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
SCREAMING_SNAKE_CASE = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
SCREAMING_SNAKE_CASE = [(pred, label)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for question, preds_labels in question_map.items():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = zip(*SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = fa_score(y_true=SCREAMING_SNAKE_CASE_ , y_pred=SCREAMING_SNAKE_CASE_ , average='macro' )
fas.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE_ ) )
ems.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = float(sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = float(fa_score(y_true=SCREAMING_SNAKE_CASE_ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __A ( self ) -> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
SCREAMING_SNAKE_CASE = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
SCREAMING_SNAKE_CASE = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowerCAmelCase__ , lowerCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 38 |
"""simple docstring"""
from __future__ import annotations
def lowercase (SCREAMING_SNAKE_CASE_ : list[int] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCAmelCase : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
_UpperCAmelCase : Optional[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
_UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_g":
_UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
_UpperCAmelCase : List[str] = value
elif weight_type == "bias":
_UpperCAmelCase : str = value
else:
_UpperCAmelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = []
_UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
_UpperCAmelCase : Dict = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCAmelCase : Dict = True
if "*" in mapped_key:
_UpperCAmelCase : Union[str, Any] = name.split(_UpperCAmelCase )[0].split("." )[-2]
_UpperCAmelCase : Dict = mapped_key.replace("*" , _UpperCAmelCase )
if "weight_g" in name:
_UpperCAmelCase : Any = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase : str = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_UpperCAmelCase : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : Optional[Any] = "weight"
else:
_UpperCAmelCase : Tuple = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = full_name.split("conv_layers." )[-1]
_UpperCAmelCase : str = name.split("." )
_UpperCAmelCase : Any = int(items[0] )
_UpperCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCAmelCase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCAmelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = torch.load(_UpperCAmelCase )
_UpperCAmelCase : Tuple = WavLMConfigOrig(checkpoint["cfg"] )
_UpperCAmelCase : Tuple = WavLMOrig(_UpperCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(_UpperCAmelCase )
else:
_UpperCAmelCase : List[Any] = WavLMConfig()
_UpperCAmelCase : Optional[Any] = WavLMModel(_UpperCAmelCase )
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase )
hf_wavlm.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 31 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304 | 0 |
from math import pi, sqrt
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ):
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ =1.0
while num:
lowercase__ =float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 90 |
import argparse
import os
import re
import packaging.version
lowercase__ ='examples/'
lowercase__ ={
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase__ ={
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase__ ='README.md'
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.read()
__a , __a : Optional[int] = REPLACE_PATTERNS[pattern]
__a : List[Any] = replace.replace('''VERSION''' , lowerCAmelCase__ )
__a : Any = re_pattern.sub(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , pattern='''examples''' )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
__a : int = '''1. Want to contribute a new model?'''
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Tuple = f.readlines()
# Find the start of the list.
__a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__a : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__a : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCAmelCase__ )
def __UpperCamelCase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__a : Optional[int] = f.read()
__a : str = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any]=False ):
__a : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__a : Union[str, Any] = default_version.base_version
elif patch:
__a : Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__a : List[str] = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__a : List[str] = input(f"Which version are you releasing? [{default_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Tuple = default_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ , patch=lowerCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCamelCase ( ):
__a : Dict = get_version()
__a : str = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__a : Any = current_version.base_version
# Check with the user we got that right.
__a : Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(lowerCAmelCase__ ) == 0:
__a : Any = dev_version
print(f"Updating version to {version}." )
global_version_update(lowerCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 90 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase : Dict = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
a_ : Optional[int] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
a_ : int = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
a_ : List[Any] = [2, 4, 1, 5]
a_ : Optional[int] = len(train_data)
a_ : str = 0.009
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str]="train" ) -> int:
'''simple docstring'''
return calculate_hypothesis_value(UpperCAmelCase_ , UpperCAmelCase_ ) - output(
UpperCAmelCase_ , UpperCAmelCase_ )
def _A (lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
_a = 0
for i in range(len(UpperCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any=m ) -> Union[str, Any]:
'''simple docstring'''
_a = 0
for i in range(UpperCAmelCase_ ):
if index == -1:
summation_value += _error(UpperCAmelCase_ )
else:
summation_value += _error(UpperCAmelCase_ ) * train_data[i][0][index]
return summation_value
def _A (lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
_a = summation_of_cost_derivative(UpperCAmelCase_ , UpperCAmelCase_ ) / m
return cost_derivative_value
def _A () -> List[str]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_a = 0.0_0_0_0_0_2
_a = 0
_a = 0
while True:
j += 1
_a = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase_ ) ):
_a = get_cost_derivative(i - 1 )
_a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ , rtol=UpperCAmelCase_ , ):
break
_a = temp_parameter_vector
print(('Number of iterations:', j) )
def _A () -> int:
'''simple docstring'''
for i in range(len(UpperCAmelCase_ ) ):
print(('Actual output value:', output(UpperCAmelCase_ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase_ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 357 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> int:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Dict:
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> str:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Any:
_a = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> int:
# pass variant but use the non-variant filenames
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[str]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
# pass variant but use the non-variant filenames
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
| 104 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = ['LayoutLMv3FeatureExtractor']
SCREAMING_SNAKE_CASE :int = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
SCREAMING_SNAKE_CASE :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a :Optional[int] = ["text", "image", "audio"]
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __a :
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 132 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_A : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , A : int = 1_4 ) ->None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
lowerCamelCase__ : List[str] = primes[group]['''prime''']
lowerCamelCase__ : List[Any] = primes[group]['''generator''']
lowerCamelCase__ : Dict = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def __lowerCamelCase ( self : Optional[Any] ) ->str:
return hex(self.__private_key )[2:]
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
lowerCamelCase__ : List[str] = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def __lowerCamelCase ( self : str , A : int ) ->bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def __lowerCamelCase ( self : Optional[int] , A : str ) ->str:
lowerCamelCase__ : Dict = int(A , base=1_6 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
lowerCamelCase__ : Dict = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def __lowerCamelCase ( A : int , A : int ) ->bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def __lowerCamelCase ( A : str , A : str , A : int = 1_4 ) ->str:
lowerCamelCase__ : int = int(A , base=1_6 )
lowerCamelCase__ : str = int(A , base=1_6 )
lowerCamelCase__ : Dict = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
lowerCamelCase__ : List[str] = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_A : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_A : Any = [0, 25, 50]
_A : Dict = [25, 50, 75]
_A : Any = fuzz.membership.trimf(X, abca)
_A : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_A : List[str] = np.ones(75)
_A : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_A : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_A : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_A : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_A : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_A : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_A : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_A : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 265 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
__A = {
"google/reformer-crime-and-punishment": 524288,
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Any=[] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : List[str] , )-> None:
'''simple docstring'''
__lowerCAmelCase: List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
__lowerCAmelCase: Any = vocab_file
__lowerCAmelCase: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase__)
@property
def lowercase_ ( self : List[Any])-> List[str]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowercase_ ( self : Optional[Any])-> Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase: int = {self.convert_ids_to_tokens(UpperCamelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = self.__dict__.copy()
__lowerCAmelCase: str = None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : str)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__lowerCAmelCase: int = {}
__lowerCAmelCase: str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowercase_ ( self : Tuple , UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : Any)-> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowerCAmelCase: Any = self.sp_model.IdToPiece(UpperCamelCase__)
return token
def lowercase_ ( self : int , UpperCamelCase__ : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__) + token
__lowerCAmelCase: int = []
else:
current_sub_tokens.append(UpperCamelCase__)
out_string += self.sp_model.decode(UpperCamelCase__)
return out_string.strip()
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCAmelCase: List[str] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase__ , "wb") as fi:
__lowerCAmelCase: int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__)
return (out_vocab_file,)
| 217 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=0.2 , UpperCamelCase__ : Any=0.2)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = bp_numa
__lowerCAmelCase: Optional[int] = bp_numa
__lowerCAmelCase: Tuple = bp_numa
__lowerCAmelCase: Optional[int] = conva_get[:2]
__lowerCAmelCase: int = conva_get[2]
__lowerCAmelCase: List[str] = size_pa
__lowerCAmelCase: Tuple = rate_w
__lowerCAmelCase: Dict = rate_t
__lowerCAmelCase: List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
__lowerCAmelCase: Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: Optional[Any] = -2 * np.random.rand(self.conva[1]) + 1
__lowerCAmelCase: int = -2 * np.random.rand(self.num_bpa) + 1
__lowerCAmelCase: str = -2 * np.random.rand(self.num_bpa) + 1
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(UpperCamelCase__ , "wb") as f:
pickle.dump(UpperCamelCase__ , UpperCamelCase__)
print(f"Model saved: {save_path}")
@classmethod
def lowercase_ ( cls : Dict , UpperCamelCase__ : Union[str, Any])-> List[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , "rb") as f:
__lowerCAmelCase: Dict = pickle.load(UpperCamelCase__) # noqa: S301
__lowerCAmelCase: Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
__lowerCAmelCase: List[str] = model_dic.get("size_pooling1")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp1")
__lowerCAmelCase: Any = model_dic.get("num_bp2")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp3")
__lowerCAmelCase: Optional[int] = model_dic.get("rate_weight")
__lowerCAmelCase: int = model_dic.get("rate_thre")
# create model instance
__lowerCAmelCase: Tuple = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# modify model parameter
__lowerCAmelCase: Any = model_dic.get("w_conv1")
__lowerCAmelCase: Optional[Any] = model_dic.get("wkj")
__lowerCAmelCase: Any = model_dic.get("vji")
__lowerCAmelCase: Dict = model_dic.get("thre_conv1")
__lowerCAmelCase: int = model_dic.get("thre_bp2")
__lowerCAmelCase: Optional[int] = model_dic.get("thre_bp3")
return conv_ins
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> Optional[Any]:
'''simple docstring'''
return round(UpperCamelCase__ , 3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = convs[0]
__lowerCAmelCase: int = convs[1]
__lowerCAmelCase: Union[str, Any] = np.shape(UpperCamelCase__)[0]
# get the data slice of original image data, data_focus
__lowerCAmelCase: Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
__lowerCAmelCase: Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase__)
# calculate the feature map of every single kernel, and saved as list of matrix
__lowerCAmelCase: int = []
__lowerCAmelCase: Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: List[str] = []
for i_focus in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase__))
__lowerCAmelCase: str = np.asmatrix(UpperCamelCase__).reshape(
UpperCamelCase__ , UpperCamelCase__)
data_featuremap.append(UpperCamelCase__)
# expanding the data slice to One dimenssion
__lowerCAmelCase: Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase__))
__lowerCAmelCase: List[Any] = np.asarray(UpperCamelCase__)
return focus_list, data_featuremap
def lowercase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]="average_pool")-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = len(featuremaps[0])
__lowerCAmelCase: List[Any] = int(size_map / size_pooling)
__lowerCAmelCase: int = []
for i_map in range(len(UpperCamelCase__)):
__lowerCAmelCase: str = featuremaps[i_map]
__lowerCAmelCase: List[Any] = []
for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase__))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase__))
__lowerCAmelCase: Optional[int] = np.asmatrix(UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__)
featuremap_pooled.append(UpperCamelCase__)
return featuremap_pooled
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str)-> int:
'''simple docstring'''
__lowerCAmelCase: List[Any] = []
for i in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = np.shape(data[i])
__lowerCAmelCase: int = data[i].reshape(1 , shapes[0] * shapes[1])
__lowerCAmelCase: Dict = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase__)
__lowerCAmelCase: Any = np.asarray(UpperCamelCase__)
return data_expanded
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = np.asarray(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = np.shape(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def lowercase_ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Any = 0
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = np.ones((size_map, size_map))
for i in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = pd_pool[
i_pool
]
__lowerCAmelCase: str = i_pool + 1
__lowerCAmelCase: Dict = np.multiply(
UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCamelCase__)
return pd_all
def lowercase_ ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str=bool)-> List[str]:
'''simple docstring'''
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(UpperCamelCase__)))
print((" - - Shape: Teach_Data ", np.shape(UpperCamelCase__)))
__lowerCAmelCase: str = 0
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: List[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowerCAmelCase: Optional[Any] = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(UpperCamelCase__)):
# print('------------Learning Image: %d--------------'%p)
__lowerCAmelCase: Dict = np.asmatrix(datas_train[p])
__lowerCAmelCase: Dict = np.asarray(datas_teach[p])
__lowerCAmelCase , __lowerCAmelCase: int = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: Optional[Any] = np.shape(UpperCamelCase__)
__lowerCAmelCase: str = self._expand(UpperCamelCase__)
__lowerCAmelCase: str = data_bp_input
__lowerCAmelCase: int = np.dot(UpperCamelCase__ , self.vji.T) - self.thre_bpa
__lowerCAmelCase: int = self.sig(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = np.dot(UpperCamelCase__ , self.wkj.T) - self.thre_bpa
__lowerCAmelCase: str = self.sig(UpperCamelCase__)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowerCAmelCase: Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: Any = np.multiply(
np.dot(UpperCamelCase__ , self.wkj) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: str = np.dot(UpperCamelCase__ , self.vji)
__lowerCAmelCase: Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowerCAmelCase: str = pd_conva_pooled.T.getA().tolist()
__lowerCAmelCase: str = self._calculate_gradient_from_pool(
UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
__lowerCAmelCase: List[Any] = self._expand_mat(pd_conva_all[k_conv])
__lowerCAmelCase: int = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
__lowerCAmelCase: Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
__lowerCAmelCase: List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Tuple = self.thre_bpa - pd_k_all * self.rate_thre
__lowerCAmelCase: Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowerCAmelCase: List[str] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowerCAmelCase: Tuple = rp + 1
__lowerCAmelCase: Optional[Any] = error_count / patterns
all_mse.append(UpperCamelCase__)
def draw_error():
__lowerCAmelCase: Dict = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCamelCase__ , "+-")
plt.plot(UpperCamelCase__ , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(UpperCamelCase__ , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(UpperCamelCase__)))
for p in range(len(UpperCamelCase__)):
__lowerCAmelCase: Dict = np.asmatrix(datas_test[p])
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Tuple = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: List[str] = self._expand(UpperCamelCase__)
__lowerCAmelCase: int = data_bp_input
__lowerCAmelCase: List[Any] = bp_outa * self.vji.T - self.thre_bpa
__lowerCAmelCase: Any = self.sig(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
__lowerCAmelCase: List[str] = self.sig(UpperCamelCase__)
produce_out.extend(bp_outa.getA().tolist())
__lowerCAmelCase: Tuple = [list(map(self.do_round , UpperCamelCase__)) for each in produce_out]
return np.asarray(UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = np.asmatrix(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 217 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCAmelCase = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
UpperCAmelCase = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowercase ( a__ : str ) -> dict[str, int]:
_UpperCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase ( a__ : tuple ) -> str:
return x[0]
def lowercase ( a__ : str ) -> str:
_UpperCamelCase = get_letter_count(a__ )
_UpperCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a__ )
_UpperCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a__ )
_UpperCamelCase = ''''''.join(freq_to_letter[freq] )
_UpperCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a__ , reverse=a__ )
_UpperCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a__ )
def lowercase ( a__ : str ) -> int:
_UpperCamelCase = get_frequency_order(a__ )
_UpperCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | """simple docstring"""
from __future__ import annotations
import math
def lowercase ( a__ : int ) -> list[int]:
if num <= 0:
_UpperCamelCase = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(a__ )
_UpperCamelCase = [True] * (num + 1)
_UpperCamelCase = []
_UpperCamelCase = 2
_UpperCamelCase = int(math.sqrt(a__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , a__ ):
if sieve[i] is True:
_UpperCamelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(a__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 54 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
_UpperCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_UpperCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_UpperCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[Any]="binary" , lowerCAmelCase : int=None , lowerCAmelCase : str="warn" , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =recall_score(
lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase , pos_label=lowerCAmelCase , average=lowerCAmelCase , sample_weight=lowerCAmelCase , zero_division=lowerCAmelCase , )
return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
| 173 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Tuple = CycleDiffusionPipeline
__snake_case : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__snake_case : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
__snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__snake_case : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=10_00 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase_ : Optional[Any] = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=0 ):
lowerCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase_ : List[str] = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith("""mps""" ):
lowerCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase_ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A ( self : str ):
lowerCAmelCase_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : List[Any] = CycleDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase_ : Optional[int] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase_ : List[str] = pipe(**lowerCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase_ : Any = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A ( self : Tuple ):
lowerCAmelCase_ : Any = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase_ , """half""" ):
lowerCAmelCase_ : int = module.half()
lowerCAmelCase_ : Optional[Any] = CycleDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase_ : int = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase_ : Dict = pipe(**lowerCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase_ : Any = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A ( self : Union[str, Any] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A ( self : int ):
return super().test_inference_batch_single_identical()
@skip_mps
def A ( self : Any ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A ( self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A ( self : Dict ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def A ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str ):
lowerCAmelCase_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
lowerCAmelCase_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
lowerCAmelCase_ : str = init_image.resize((5_12, 5_12) )
lowerCAmelCase_ : int = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase_ : List[str] = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder="""scheduler""" )
lowerCAmelCase_ : str = CycleDiffusionPipeline.from_pretrained(
lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ : List[Any] = """A black colored car"""
lowerCAmelCase_ : Optional[Any] = """A blue colored car"""
lowerCAmelCase_ : List[str] = torch.manual_seed(0 )
lowerCAmelCase_ : str = pipe(
prompt=lowerCamelCase_ , source_prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase_ , output_type="""np""" , )
lowerCAmelCase_ : Optional[int] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A ( self : str ):
lowerCAmelCase_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
lowerCAmelCase_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
lowerCAmelCase_ : List[Any] = init_image.resize((5_12, 5_12) )
lowerCAmelCase_ : Dict = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase_ : List[Any] = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder="""scheduler""" )
lowerCAmelCase_ : int = CycleDiffusionPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ : str = """A black colored car"""
lowerCAmelCase_ : int = """A blue colored car"""
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipe(
prompt=lowerCamelCase_ , source_prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase_ , output_type="""np""" , )
lowerCAmelCase_ : str = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 354 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=__a , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=__a , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=__a , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=__a , default=1_0_0_0 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=__a , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=__a , type=__a , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=__a , default=5_1_2 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=__a , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
A__ = parser.parse_args()
return args
def __lowerCamelCase ( __a :List[str] ) -> Any:
"""simple docstring"""
def fn(__a :Optional[int] ):
return tokenizer(examples["""text"""] )
return fn
def __lowerCamelCase ( __a :str ) -> Any:
"""simple docstring"""
A__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
A__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
A__ = tf.train.Features(feature=__a )
A__ = tf.train.Example(features=__a )
A__ = example.SerializeToString()
records.append(__a )
return records
def __lowerCamelCase ( __a :str ) -> Tuple:
"""simple docstring"""
A__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
A__ = min(len(__a ) , args.limit )
A__ = dataset.select(range(__a ) )
print(F'Limiting the dataset to {args.limit} entries.' )
A__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
A__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(__a ):
os.makedirs(__a )
else:
A__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
A__ = tokenize_function(__a )
A__ = dataset.map(__a , batched=__a , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__a :str ):
# Concatenate all texts.
A__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
A__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
A__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
A__ = {
k: [t[i : i + args.max_length] for i in range(0 , __a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
A__ = dataset_tokenized.map(__a , batched=__a , batch_size=1_0_0_0 , num_proc=4 )
A__ = 0
A__ = 0
for shard in range(0 , len(__a ) , args.shard_size ):
A__ = grouped_dataset[shard : shard + args.shard_size]
A__ = len(dataset_snapshot["""input_ids"""] )
A__ = os.path.join(__a , F'dataset-{shard_count}-{records_containing}.tfrecord' )
A__ = get_serialized_examples(__a )
with tf.io.TFRecordWriter(__a ) as out_file:
for i in range(len(__a ) ):
A__ = serialized_examples[i]
out_file.write(__a )
print("""Wrote file {} containing {} records""".format(__a , __a ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , """w""" ) as f:
print(F'Total {args.split} records: {total_records}' , file=__a )
if __name__ == "__main__":
A : str = parse_args()
main(args)
| 274 |
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a : List[str] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class _a ( _lowerCAmelCase ):
A = '''albert'''
def __init__(self, SCREAMING_SNAKE_CASE_=30000, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=16384, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = vocab_size
UpperCAmelCase_: List[Any] = embedding_size
UpperCAmelCase_: Union[str, Any] = hidden_size
UpperCAmelCase_: Dict = num_hidden_layers
UpperCAmelCase_: Union[str, Any] = num_hidden_groups
UpperCAmelCase_: List[str] = num_attention_heads
UpperCAmelCase_: str = inner_group_num
UpperCAmelCase_: Any = hidden_act
UpperCAmelCase_: str = intermediate_size
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: Any = attention_probs_dropout_prob
UpperCAmelCase_: List[str] = max_position_embeddings
UpperCAmelCase_: Tuple = type_vocab_size
UpperCAmelCase_: Optional[int] = initializer_range
UpperCAmelCase_: Optional[Any] = layer_norm_eps
UpperCAmelCase_: List[str] = classifier_dropout_prob
UpperCAmelCase_: Optional[int] = position_embedding_type
class _a ( _lowerCAmelCase ):
@property
def __snake_case (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_: Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_: List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 82 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( _lowerCAmelCase ):
A = '''encodec'''
def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
UpperCAmelCase_: List[Any] = target_bandwidths
UpperCAmelCase_: str = sampling_rate
UpperCAmelCase_: Any = audio_channels
UpperCAmelCase_: List[str] = normalize
UpperCAmelCase_: List[Any] = chunk_length_s
UpperCAmelCase_: List[Any] = overlap
UpperCAmelCase_: Any = hidden_size
UpperCAmelCase_: str = num_filters
UpperCAmelCase_: Any = num_residual_layers
UpperCAmelCase_: int = upsampling_ratios
UpperCAmelCase_: Tuple = norm_type
UpperCAmelCase_: Union[str, Any] = kernel_size
UpperCAmelCase_: str = last_kernel_size
UpperCAmelCase_: Union[str, Any] = residual_kernel_size
UpperCAmelCase_: str = dilation_growth_rate
UpperCAmelCase_: int = use_causal_conv
UpperCAmelCase_: int = pad_mode
UpperCAmelCase_: List[Any] = compress
UpperCAmelCase_: Dict = num_lstm_layers
UpperCAmelCase_: List[Any] = trim_right_ratio
UpperCAmelCase_: List[Any] = codebook_size
UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase_: Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case (self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 82 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A =[
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def lowerCamelCase_ ( lowerCamelCase__ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase_ = k.replace(lowerCamelCase__ , lowerCamelCase__ )
if k.startswith("encoder" ):
lowerCamelCase_ = k.replace(".attn" , ".self_attn" )
lowerCamelCase_ = k.replace("norm1" , "self_attn_layer_norm" )
lowerCamelCase_ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
lowerCamelCase_ = k.replace("norm1" , "self_attn_layer_norm" )
lowerCamelCase_ = k.replace("norm2" , "encoder_attn_layer_norm" )
lowerCamelCase_ = k.replace("norm3" , "final_layer_norm" )
return k
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
lowerCamelCase_ = sd.pop(lowerCamelCase__ )
lowerCamelCase_ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
lowerCamelCase_ = v
__A =['''START''']
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = model["model"]
lowerCamelCase_ = BlenderbotConfig.from_json_file(lowerCamelCase__ )
lowerCamelCase_ = BlenderbotForConditionalGeneration(lowerCamelCase__ )
lowerCamelCase_ = m.model.state_dict().keys()
lowerCamelCase_ = []
lowerCamelCase_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase_ = rename_state_dict_key(lowerCamelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCamelCase__ )
m.model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
m.half()
m.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__A =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : int = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : str = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(__UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ ,snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
snake_case_ = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 358 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a ( _lowerCamelCase ):
snake_case_ = "xlm-roberta-xl"
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=25_0880 , lowercase_ : Tuple=2560 , lowercase_ : str=36 , lowercase_ : List[str]=32 , lowercase_ : Optional[Any]=1_0240 , lowercase_ : List[str]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=514 , lowercase_ : Any=1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=1e-05 , lowercase_ : List[Any]=1 , lowercase_ : str=0 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]="absolute" , lowercase_ : str=True , lowercase_ : str=None , **lowercase_ : Tuple , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
@property
def A_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase : Tuple = arr[i]
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
# A temporary array to store all combination one by one
lowercase : Optional[int] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 20 |
import argparse
from collections import defaultdict
import yaml
A : str = '''docs/source/en/_toctree.yml'''
def __lowerCamelCase ( __a :str ) -> List[Any]:
"""simple docstring"""
A__ = defaultdict(__a )
A__ = []
A__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__a )
A__ = new_doc_list
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
A__ = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__a )
# Sort
return overview_doc
def __lowerCamelCase ( __a :Any=False ) -> List[str]:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A__ = api_doc[scheduler_idx]["""sections"""]
A__ = clean_doc_toc(__a )
A__ = False
if new_scheduler_doc != scheduler_doc:
A__ = True
if overwrite:
A__ = new_scheduler_doc
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A__ = False
A__ = api_doc[pipeline_idx]["""sections"""]
A__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A__ = pipeline_doc["""section"""]
A__ = clean_doc_toc(__a )
if overwrite:
A__ = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
A__ = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
A__ = True
if overwrite:
A__ = new_pipeline_docs
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 274 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase : Optional[Any] = None
try:
import msvcrt
except ImportError:
__UpperCamelCase : str = None
try:
import fcntl
except ImportError:
__UpperCamelCase : Dict = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__UpperCamelCase : Tuple = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__UpperCamelCase : Optional[Any] = "3.0.12"
__UpperCamelCase : Optional[int] = None
def __A ( ) -> Dict:
global _logger
a = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( __magic_name__ ):
def __init__( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = lock_file
return None
def __str__( self :Any ):
'''simple docstring'''
a = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __lowerCAmelCase :
def __init__( self :int , __magic_name__ :List[Any] ):
'''simple docstring'''
a = lock
return None
def __enter__( self :Any ):
'''simple docstring'''
return self.lock
def __exit__( self :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Optional[Any] ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self :Any , __magic_name__ :Dict , __magic_name__ :Optional[int]=-1 , __magic_name__ :str=None ):
'''simple docstring'''
a = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
a = self.hash_filename_if_too_long(__magic_name__ , __magic_name__ )
# The path to the lock file.
a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a = None
# The default timeout value.
a = timeout
# We use this lock primarily for the lock counter.
a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a = 0
return None
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase__ ( self :Any , __magic_name__ :Dict ):
'''simple docstring'''
a = float(__magic_name__ )
return None
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Optional[Any]=None , __magic_name__ :Dict=0.05 ):
'''simple docstring'''
if timeout is None:
a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a = id(self )
a = self._lock_file
a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__magic_name__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase__ ( self :str , __magic_name__ :Any=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a = id(self )
a = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
a = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self :List[Any] ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :Tuple ):
'''simple docstring'''
self.release()
return None
def __del__( self :Optional[Any] ):
'''simple docstring'''
self.release(force=__magic_name__ )
return None
def lowerCamelCase__ ( self :Dict , __magic_name__ :str , __magic_name__ :int ):
'''simple docstring'''
a = os.path.basename(__magic_name__ )
if len(__magic_name__ ) > max_length and max_length > 0:
a = os.path.dirname(__magic_name__ )
a = str(hash(__magic_name__ ) )
a = filename[: max_length - len(__magic_name__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__magic_name__ , __magic_name__ )
else:
return path
class __lowerCAmelCase ( __magic_name__ ):
def __init__( self :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :List[Any]=-1 , __magic_name__ :Dict=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__magic_name__ , timeout=__magic_name__ , max_filename_length=__magic_name__ )
a = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a = os.open(self._lock_file , __magic_name__ )
except OSError:
pass
else:
try:
msvcrt.locking(__magic_name__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__magic_name__ )
else:
a = fd
return None
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self._lock_file_fd
a = None
msvcrt.locking(__magic_name__ , msvcrt.LK_UNLCK , 1 )
os.close(__magic_name__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( __magic_name__ ):
def __init__( self :int , __magic_name__ :Optional[Any] , __magic_name__ :List[Any]=-1 , __magic_name__ :Tuple=None ):
'''simple docstring'''
a = os.statvfs(os.path.dirname(__magic_name__ ) ).f_namemax
super().__init__(__magic_name__ , timeout=__magic_name__ , max_filename_length=__magic_name__ )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
a = os.open(self._lock_file , __magic_name__ )
try:
fcntl.flock(__magic_name__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__magic_name__ )
else:
a = fd
return None
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self._lock_file_fd
a = None
fcntl.flock(__magic_name__ , fcntl.LOCK_UN )
os.close(__magic_name__ )
return None
class __lowerCAmelCase ( __magic_name__ ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a = os.open(self._lock_file , __magic_name__ )
except OSError:
pass
else:
a = fd
return None
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
os.close(self._lock_file_fd )
a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase : Dict = None
if msvcrt:
__UpperCamelCase : List[Any] = WindowsFileLock
elif fcntl:
__UpperCamelCase : Dict = UnixFileLock
else:
__UpperCamelCase : Tuple = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 347 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 1 |
import re
def A ( _lowerCamelCase ):
'''simple docstring'''
return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]" , str_ )]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
_lowerCAmelCase : Any = split_input(_lowerCamelCase )
if upper:
_lowerCAmelCase : Dict = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
_lowerCAmelCase : Dict = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A ( _lowerCamelCase ):
'''simple docstring'''
return to_simple_case(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
try:
_lowerCAmelCase : Dict = to_simple_case(_lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return to_complex_case(_lowerCamelCase , _lowerCamelCase , "_" )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return to_complex_case(_lowerCamelCase , _lowerCamelCase , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 36 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split("." ):
A : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
A : List[str] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
A : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A : List[str] = value
elif weight_type == "weight_g":
A : List[Any] = value
elif weight_type == "weight_v":
A : Any = value
elif weight_type == "bias":
A : Dict = value
else:
A : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Dict = []
A : List[str] = fairseq_model.state_dict()
A : List[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
A : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
A : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A : List[str] = True
if "*" in mapped_key:
A : Any = name.split(_lowerCamelCase )[0].split("." )[-2]
A : Union[str, Any] = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
A : Optional[Any] = "weight_g"
elif "weight_v" in name:
A : int = "weight_v"
elif "weight" in name:
A : Dict = "weight"
elif "bias" in name:
A : Optional[int] = "bias"
else:
A : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = full_name.split("conv_layers." )[-1]
A : str = name.split("." )
A : str = int(items[0] )
A : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = SEWConfig()
if is_finetuned:
A : int = model.wav_encoder.wav_model.cfg
else:
A : Any = model.cfg
A : List[str] = fs_config.conv_bias
A : Optional[Any] = eval(fs_config.conv_feature_layers )
A : Dict = [x[0] for x in conv_layers]
A : int = [x[1] for x in conv_layers]
A : Dict = [x[2] for x in conv_layers]
A : Tuple = "gelu"
A : List[Any] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
A : List[str] = 0.0
A : Union[str, Any] = fs_config.activation_fn.name
A : int = fs_config.encoder_embed_dim
A : List[str] = 0.02
A : Dict = fs_config.encoder_ffn_embed_dim
A : List[str] = 1e-5
A : List[str] = fs_config.encoder_layerdrop
A : Optional[int] = fs_config.encoder_attention_heads
A : Any = fs_config.conv_pos_groups
A : str = fs_config.conv_pos
A : str = len(_lowerCamelCase )
A : Optional[int] = fs_config.encoder_layers
A : List[str] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A : int = model.cfg
A : List[str] = fs_config.final_dropout
A : Optional[int] = fs_config.layerdrop
A : Dict = fs_config.activation_dropout
A : Dict = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A : Dict = fs_config.attention_dropout
A : Optional[Any] = fs_config.dropout_input
A : Union[str, Any] = fs_config.dropout
A : Tuple = fs_config.mask_channel_length
A : int = fs_config.mask_channel_prob
A : Optional[Any] = fs_config.mask_length
A : Union[str, Any] = fs_config.mask_prob
A : int = "Wav2Vec2FeatureExtractor"
A : List[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
if is_finetuned:
A , A , A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A , A , A : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A : Any = SEWConfig.from_pretrained(_lowerCamelCase )
else:
A : List[str] = convert_config(model[0] , _lowerCamelCase )
A : List[str] = model[0].eval()
A : Optional[int] = True if config.feat_extract_norm == "layer" else False
A : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
A : Union[str, Any] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : str = target_dict.pad_index
A : Dict = target_dict.bos_index
A : List[str] = target_dict.pad_index
A : Any = target_dict.bos_index
A : str = target_dict.eos_index
A : Dict = len(target_dict.symbols )
A : List[Any] = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
A : str = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
A : int = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
A : Optional[int] = SEWForCTC(_lowerCamelCase )
else:
A : Dict = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 256 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
__SCREAMING_SNAKE_CASE = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ElectraTokenizer
def __init__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : int="[UNK]" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : Tuple="[MASK]" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=None , **__lowerCamelCase : str , ) -> List[str]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
A : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
A : Union[str, Any] = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
A : List[Any] = do_lower_case
A : Tuple = strip_accents
A : Any = tokenize_chinese_chars
A : Tuple = normalizer_class(**__lowerCamelCase )
A : Optional[Any] = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=None ) -> List[Any]:
A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : int = [self.sep_token_id]
A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase ) | 256 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase : Any = logging.get_logger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_UpperCAmelCase :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_UpperCAmelCase :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.task_name.lower()
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = 'train'
_UpperCAmelCase :List[str] = 'dev'
_UpperCAmelCase :List[str] = 'test'
class A__ ( __snake_case ):
_UpperCAmelCase :GlueDataTrainingArguments
_UpperCAmelCase :str
_UpperCAmelCase :List[InputFeatures]
def __init__( self , A_ , A_ , A_ = None , A_ = Split.train , A_ = None , ):
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , A_ , )
UpperCamelCase : Dict = args
UpperCamelCase : Tuple = glue_processors[args.task_name]()
UpperCamelCase : int = glue_output_modes[args.task_name]
if isinstance(A_ , A_ ):
try:
UpperCamelCase : Any = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
UpperCamelCase : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase : Dict = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase : str = label_list[2], label_list[1]
UpperCamelCase : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : Union[str, Any] = cached_features_file + ".lock"
with FileLock(A_ ):
if os.path.exists(A_ ) and not args.overwrite_cache:
UpperCamelCase : List[str] = time.time()
UpperCamelCase : List[str] = torch.load(A_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase : List[str] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase : List[str] = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase : Union[str, Any] = examples[:limit_length]
UpperCamelCase : Any = glue_convert_examples_to_features(
A_ , A_ , max_length=args.max_seq_length , label_list=A_ , output_mode=self.output_mode , )
UpperCamelCase : List[Any] = time.time()
torch.save(self.features , A_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ):
'''simple docstring'''
return self.features[i]
def __UpperCamelCase( self ):
'''simple docstring'''
return self.label_list
| 52 |
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE : List[str] = "src/transformers"
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE : Any = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE : Tuple = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE : List[str] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE : int = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE : List[Any] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE : int = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
SCREAMING_SNAKE_CASE : List[str] = re.compile(r"^\s*try:")
# Catches a line with else:
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"^\s*else:")
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
if _re_test_backend.search(lowerCamelCase_ ) is None:
return None
_lowercase : Union[str, Any] = [b[0] for b in _re_backend.findall(lowerCamelCase_ )]
backends.sort()
return "_and_".join(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : int = f.readlines()
_lowercase : Any = 0
while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowercase : Optional[Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_lowercase : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_ ):
_lowercase : Tuple = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0]
_lowercase : List[str] = re.findall('\[([^\]]+)\]' , lowerCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_lowercase : Optional[Any] = _re_import_struct_key_value.search(lowerCamelCase_ )
if single_line_import_search is not None:
_lowercase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_lowercase : Dict = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowercase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowercase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowercase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_lowercase : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None:
_lowercase : List[Any] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(', ' )
_lowercase : Optional[int] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_between_brackets.search(lowerCamelCase_ ) is not None:
_lowercase : Union[str, Any] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(', ' )
_lowercase : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_quote_object.search(lowerCamelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
_lowercase : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowercase : int = []
while (
line_index < len(lowerCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_lowercase : Any = lines[line_index]
_lowercase : Dict = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowercase : int = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowercase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowercase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_lowercase : Tuple = lines[line_index]
_lowercase : List[Any] = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowercase : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
def find_duplicates(lowerCamelCase_ ):
return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowercase : Optional[Any] = []
for key in import_dict_objects.keys():
_lowercase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowercase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowercase : Optional[int] = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCamelCase_( ) -> str:
_lowercase : Union[str, Any] = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
_lowercase : str = os.path.join(lowerCamelCase_ , '__init__.py' )
_lowercase : Optional[Any] = parse_init(lowerCamelCase_ )
if objects is not None:
_lowercase : Optional[int] = analyze_results(*lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Union[str, Any] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
raise ValueError('\n\n'.join(lowerCamelCase_ ) )
def UpperCamelCase_( ) -> int:
_lowercase : int = []
for path, directories, files in os.walk(lowerCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowerCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_ ) / folder).glob('*.py' ) ) ) == 0:
continue
_lowercase : Any = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) )
_lowercase : List[Any] = short_path.replace(os.path.sep , '.' )
submodules.append(lowerCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
_lowercase : str = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowerCamelCase_ )
return submodules
SCREAMING_SNAKE_CASE : Dict = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def UpperCamelCase_( ) -> Any:
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[int] = importlib.util.spec_from_file_location(
'transformers' , os.path.join(lowerCamelCase_ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowercase : Tuple = spec.loader.load_module()
_lowercase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase_ ) > 0:
_lowercase : Union[str, Any] = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 358 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowercase : Any = TOKENIZER_CLASSES
else:
_lowercase : Tuple = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowercase : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
_lowercase : Any = True
if checkpoint_name is None:
_lowercase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowercase : List[Any] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowercase : Union[str, Any] = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowercase , _lowercase : str = checkpoint.split('/' )
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
_lowercase : Union[str, Any] = checkpoint
_lowercase : List[str] = dump_path
else:
_lowercase : str = None
_lowercase : Any = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowercase : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowercase : List[Any] = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowercase : Optional[Any] = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCamelCase_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 84 | 0 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__SCREAMING_SNAKE_CASE : List[str] = True
except ImportError:
__SCREAMING_SNAKE_CASE : int = False
try:
from torch.hub import _get_torch_home
__SCREAMING_SNAKE_CASE : str = _get_torch_home()
except ImportError:
__SCREAMING_SNAKE_CASE : Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(torch_cache_home, 'transformers')
__SCREAMING_SNAKE_CASE : Tuple = 'https://cdn.huggingface.co'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__SCREAMING_SNAKE_CASE : List[str] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(PATH, 'config.yaml')
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(PATH, 'attributes.txt')
__SCREAMING_SNAKE_CASE : str = os.path.join(PATH, 'objects.txt')
__SCREAMING_SNAKE_CASE : str = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__SCREAMING_SNAKE_CASE : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__SCREAMING_SNAKE_CASE : Tuple = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pytorch_model.bin'
__SCREAMING_SNAKE_CASE : Optional[int] = 'config.yaml'
def _a ( _SCREAMING_SNAKE_CASE=OBJECTS , _SCREAMING_SNAKE_CASE=ATTRIBUTES ) -> List[str]:
snake_case_ = []
with open(_SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
snake_case_ = []
with open(_SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = OrderedDict()
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
snake_case_ = pkl.load(_SCREAMING_SNAKE_CASE )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case_ = ckp.pop(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case_ = torch.tensor(_SCREAMING_SNAKE_CASE )
else:
assert isinstance(_SCREAMING_SNAKE_CASE , torch.tensor ), type(_SCREAMING_SNAKE_CASE )
snake_case_ = v
return r
class __A :
'''simple docstring'''
__lowercase: Optional[Any] = {}
def __init__( self : Dict , UpperCAmelCase_ : dict , UpperCAmelCase_ : str = "root" , UpperCAmelCase_ : Tuple=0 ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = name
snake_case_ = level
snake_case_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = Config(UpperCAmelCase_ , name=UpperCAmelCase_ , level=level + 1 )
snake_case_ = v
setattr(self , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = d
def __repr__( self : Dict ) ->int:
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ) ->Tuple:
"""simple docstring"""
snake_case_ = val
snake_case_ = val
snake_case_ = key.split(""".""" )
snake_case_ = len(UpperCAmelCase_ ) - 1
snake_case_ = self._pointer
if len(UpperCAmelCase_ ) > 1:
for i, l in enumerate(UpperCAmelCase_ ):
if hasattr(self , UpperCAmelCase_ ) and isinstance(getattr(self , UpperCAmelCase_ ) , UpperCAmelCase_ ):
setattr(getattr(self , UpperCAmelCase_ ) , """.""".join(levels[i:] ) , UpperCAmelCase_ )
if l == last_level:
snake_case_ = val
else:
snake_case_ = pointer[l]
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return self._pointer
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ) ->Dict:
"""simple docstring"""
with open(F"""{file_name}""" , """w""" ) as stream:
dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) ->Tuple:
"""simple docstring"""
with open(F"""{file_name}""" , """w""" ) as stream:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
with open(UpperCAmelCase_ ) as stream:
snake_case_ = load(UpperCAmelCase_ , Loader=UpperCAmelCase_ )
return data
def __str__( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """ """
if self._name != "root":
snake_case_ = F"""{t * (self._level-1)}{self._name}:\n"""
else:
snake_case_ = """"""
snake_case_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(UpperCAmelCase_ ).__name__})\n"""
snake_case_ = level
return r[:-1]
@classmethod
def lowerCAmelCase ( cls : List[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
return cls(UpperCAmelCase_ )
@classmethod
def lowerCAmelCase ( cls : Any , UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
snake_case_ = kwargs.pop("""cache_dir""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""force_download""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""resume_download""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""proxies""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""local_files_only""" , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
snake_case_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
elif os.path.isfile(UpperCAmelCase_ ) or is_remote_url(UpperCAmelCase_ ):
snake_case_ = pretrained_model_name_or_path
else:
snake_case_ = hf_bucket_url(UpperCAmelCase_ , filename=UpperCAmelCase_ , use_cdn=UpperCAmelCase_ )
try:
# Load from URL or cache if already cached
snake_case_ = cached_path(
UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case_ = Config.load_yaml(UpperCAmelCase_ )
except EnvironmentError:
snake_case_ = """Can't load config for"""
raise EnvironmentError(UpperCAmelCase_ )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(UpperCAmelCase_ ), kwargs
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = torch.load("""dump.pt""" , map_location=in_tensor.device )
snake_case_ = in_tensor.numpy()
snake_case_ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = urlparse(_SCREAMING_SNAKE_CASE )
return parsed.scheme in ("http", "https")
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> str:
snake_case_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case_ = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , ) -> Any:
snake_case_ = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join("""{}/{}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
snake_case_ = {"""user-agent""": ua}
if resume_size > 0:
snake_case_ = """bytes=%d-""" % (resume_size,)
snake_case_ = requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
if response.status_code == 416: # Range not satisfiable
return
snake_case_ = response.headers.get("""Content-Length""" )
snake_case_ = resume_size + int(_SCREAMING_SNAKE_CASE ) if content_length is not None else None
snake_case_ = tqdm(
unit="""B""" , unit_scale=_SCREAMING_SNAKE_CASE , total=_SCREAMING_SNAKE_CASE , initial=_SCREAMING_SNAKE_CASE , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_SCREAMING_SNAKE_CASE ) )
temp_file.write(_SCREAMING_SNAKE_CASE )
progress.close()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , ) -> Any:
if cache_dir is None:
snake_case_ = TRANSFORMERS_CACHE
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = str(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = None
if not local_files_only:
try:
snake_case_ = requests.head(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE )
if response.status_code == 200:
snake_case_ = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case_ = url_to_filename(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# get cache path to put the file
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_SCREAMING_SNAKE_CASE ):
return cache_path
else:
snake_case_ = [
file
for file in fnmatch.filter(os.listdir(_SCREAMING_SNAKE_CASE ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
return os.path.join(_SCREAMING_SNAKE_CASE , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case_ = cache_path + """.lock"""
with FileLock(_SCREAMING_SNAKE_CASE ):
# If the download just completed while the lock was activated.
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case_ = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_SCREAMING_SNAKE_CASE , """a+b""" ) as f:
yield f
snake_case_ = _resumable_file_manager
if os.path.exists(_SCREAMING_SNAKE_CASE ):
snake_case_ = os.stat(_SCREAMING_SNAKE_CASE ).st_size
else:
snake_case_ = 0
else:
snake_case_ = partial(tempfile.NamedTemporaryFile , dir=_SCREAMING_SNAKE_CASE , delete=_SCREAMING_SNAKE_CASE )
snake_case_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _SCREAMING_SNAKE_CASE , temp_file.name , )
http_get(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_size=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , )
os.replace(temp_file.name , _SCREAMING_SNAKE_CASE )
snake_case_ = {"""url""": url, """etag""": etag}
snake_case_ = cache_path + """.json"""
with open(_SCREAMING_SNAKE_CASE , """w""" ) as meta_file:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cache_path
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = url.encode("""utf-8""" )
snake_case_ = shaaaa(_SCREAMING_SNAKE_CASE )
snake_case_ = url_hash.hexdigest()
if etag:
snake_case_ = etag.encode("""utf-8""" )
snake_case_ = shaaaa(_SCREAMING_SNAKE_CASE )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ) -> Any:
if cache_dir is None:
snake_case_ = TRANSFORMERS_CACHE
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = str(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = str(_SCREAMING_SNAKE_CASE )
if is_remote_url(_SCREAMING_SNAKE_CASE ):
# URL, so get it from the cache (downloading if necessary)
snake_case_ = get_from_cache(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
elif os.path.exists(_SCREAMING_SNAKE_CASE ):
# File, and it exists.
snake_case_ = url_or_filename
elif urlparse(_SCREAMING_SNAKE_CASE ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_SCREAMING_SNAKE_CASE ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_SCREAMING_SNAKE_CASE ) )
if extract_compressed_file:
if not is_zipfile(_SCREAMING_SNAKE_CASE ) and not tarfile.is_tarfile(_SCREAMING_SNAKE_CASE ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case_ , snake_case_ = os.path.split(_SCREAMING_SNAKE_CASE )
snake_case_ = output_file.replace(""".""" , """-""" ) + """-extracted"""
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ) and os.listdir(_SCREAMING_SNAKE_CASE ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case_ = output_path + """.lock"""
with FileLock(_SCREAMING_SNAKE_CASE ):
shutil.rmtree(_SCREAMING_SNAKE_CASE , ignore_errors=_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE )
if is_zipfile(_SCREAMING_SNAKE_CASE ):
with ZipFile(_SCREAMING_SNAKE_CASE , """r""" ) as zip_file:
zip_file.extractall(_SCREAMING_SNAKE_CASE )
zip_file.close()
elif tarfile.is_tarfile(_SCREAMING_SNAKE_CASE ):
snake_case_ = tarfile.open(_SCREAMING_SNAKE_CASE )
tar_file.extractall(_SCREAMING_SNAKE_CASE )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_SCREAMING_SNAKE_CASE ) )
return output_path_extracted
return output_path
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="," ) -> Dict:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = eval(f.read() )
else:
snake_case_ = requests.get(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = requests.json()
except Exception:
snake_case_ = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case_ = eval(_SCREAMING_SNAKE_CASE )
except Exception:
snake_case_ = data.split("""\n""" )
req.close()
return data
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = requests.get(_SCREAMING_SNAKE_CASE )
snake_case_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as stream:
snake_case_ = pkl.load(_SCREAMING_SNAKE_CASE )
snake_case_ = weights.pop("""model""" )
snake_case_ = {}
for k, v in model.items():
snake_case_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
if "running_var" in k:
snake_case_ = torch.tensor([0] )
snake_case_ = k.replace("""running_var""" , """num_batches_tracked""" )
snake_case_ = zero
return new
def _a ( ) -> str:
print(f"""{os.path.abspath(os.path.join(_SCREAMING_SNAKE_CASE , os.pardir ) )}/demo.ipynb""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="RGB" ) -> List[Any]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
snake_case_ = cva.imread(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = get_image_from_url(_SCREAMING_SNAKE_CASE )
assert img is not None, f"""could not connect to: {im}"""
snake_case_ = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case_ = img[:, :, ::-1]
return img
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 ) -> int:
return (images[i : i + batch] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ))
| 347 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Union[str, Any] , __A : List[Any] ) -> int:
"""simple docstring"""
a_ : str = os.path.abspath(__A )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
a_ : Dict = tf.train.list_variables(__A )
a_ : List[str] = []
a_ : Union[str, Any] = []
a_ : Any = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
a_ : Optional[int] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
a_ : Optional[Any] = name[1:]
# figure out how many levels deep the name is
a_ : List[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(__A )
# read data
a_ : int = tf.train.load_variable(__A , __A )
names.append('/'.join(__A ) )
arrays.append(__A )
logger.info(F"""Read a total of {len(__A ):,} layers""" )
# Sanity check
if len(set(__A ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__A ) )})""" )
a_ : Union[str, Any] = list(set(__A ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(__A , __A ):
a_ : Union[str, Any] = full_name.split('/' )
a_ : Dict = model
a_ : List[str] = []
for i, m_name in enumerate(__A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
a_ : Optional[int] = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
a_ : str = getattr(__A , 'embeddings' )
a_ : List[str] = getattr(__A , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
a_ : Optional[Any] = getattr(__A , 'encoder' )
a_ : List[str] = getattr(__A , 'layer' )
a_ : Optional[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
a_ : int = getattr(__A , 'pooler' )
a_ : Dict = getattr(__A , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
a_ : Optional[Any] = getattr(__A , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
a_ : List[Any] = getattr(__A , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
a_ : int = getattr(__A , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
a_ : Dict = getattr(__A , 'token_type_embeddings' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('weight' )
a_ : Optional[Any] = getattr(__A , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
a_ : str = getattr(__A , 'attention' )
a_ : int = getattr(__A , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
a_ : int = getattr(__A , 'attention' )
a_ : Tuple = getattr(__A , 'output' )
a_ : Union[str, Any] = getattr(__A , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
a_ : Optional[int] = getattr(__A , 'attention' )
a_ : str = getattr(__A , 'output' )
a_ : Any = getattr(__A , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
a_ : int = getattr(__A , 'output' )
a_ : List[Any] = getattr(__A , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
a_ : Any = getattr(__A , 'output' )
a_ : int = getattr(__A , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
a_ : List[Any] = getattr(__A , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
a_ : Dict = getattr(__A , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
a_ : Union[str, Any] = getattr(__A , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
a_ : Union[str, Any] = getattr(__A , 'intermediate' )
a_ : List[str] = getattr(__A , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
a_ : List[Any] = getattr(__A , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
a_ : Tuple = getattr(__A , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
a_ : Dict = getattr(__A , 'weight' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
a_ : Optional[Any] = '.'.join(__A )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __A ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , __A ):
a_ : Optional[int] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
a_ : Optional[int] = array.transpose()
if pointer.shape == array.shape:
a_ : str = torch.from_numpy(__A )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Optional[Any] , __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
logger.info(F"""Loading model based on config from {config_path}...""" )
a_ : Optional[Any] = BertConfig.from_json_file(__A )
a_ : List[str] = BertModel(__A )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__A , __A , __A )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 356 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Any = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : Dict = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a_ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a_ : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Tuple = self.get_env()
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
a_ : str = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Dict = '1'
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : Union[str, Any] = '\nfrom transformers import pipeline\n '
a_ : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a_ : Union[str, Any] = self.get_env()
a_ : Optional[Any] = '1'
a_ : int = [sys.executable, '-c', '\n'.join([load, mock, run] )]
a_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Optional[int] = '\nfrom transformers import AutoModel\n '
a_ : Dict = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a_ : Tuple = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Optional[Any] = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Optional[int] = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 120 | 0 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def a_ ( _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = np.nan
for i in range(_lowerCAmelCase ):
lowercase__ : str = features[:, labels == i]
lowercase__ : Optional[int] = data.mean(1 )
# Centralize the data of class i
lowercase__ : List[Any] = data - column_reshape(_lowerCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : List[Any] = np.dot(_lowerCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Dict = features.mean(1 )
lowercase__ : Any = np.nan
for i in range(_lowerCAmelCase ):
lowercase__ : Any = features[:, labels == i]
lowercase__ : Optional[int] = data.shape[1]
lowercase__ : str = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase ) , (column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase ) , (column_reshape(_lowerCAmelCase ) - column_reshape(_lowerCAmelCase )).T , )
return covariance_sum / features.shape[1]
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ):
'''simple docstring'''
if features.any():
lowercase__ : str = features.mean(1 )
# Center the dataset
lowercase__ : Any = features - np.reshape(_lowerCAmelCase , (data_mean.size, 1) )
lowercase__ : List[str] = np.dot(_lowerCAmelCase , centered_data.T ) / features.shape[1]
lowercase__ , lowercase__ : Dict = np.linalg.eigh(_lowerCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : int = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Optional[int] = np.dot(filtered_eigenvectors.T , _lowerCAmelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__ , lowercase__ : List[str] = eigh(
covariance_between_classes(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , covariance_within_classes(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , )
lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = np.linalg.svd(_lowerCAmelCase )
lowercase__ : List[Any] = svd_matrix[:, 0:dimensions]
lowercase__ : Any = np.dot(filtered_svd_matrix.T , _lowerCAmelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( ):
'''simple docstring'''
lowercase__ : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ : Optional[int] = np.array([0, 0, 0, 1, 1] )
lowercase__ : Optional[Any] = 2
lowercase__ : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCAmelCase ) as error_info:
lowercase__ : Any = linear_discriminant_analysis(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if isinstance(_lowerCAmelCase , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ : Optional[Any] = 2
lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCAmelCase ) as error_info:
lowercase__ : Optional[Any] = principal_component_analysis(_lowerCAmelCase , _lowerCAmelCase )
if not np.allclose(_lowerCAmelCase , _lowerCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | """simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_A : Tuple = logging.get_logger(__name__)
_A : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_A : str = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_A : List[Any] = {
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"]
_UpperCAmelCase : str = RobertaTokenizer
def __init__( self : int , A : int=None , A : Any=None , A : int=None , A : List[Any]="replace" , A : Optional[int]="<s>" , A : Optional[int]="</s>" , A : List[Any]="</s>" , A : List[Any]="<s>" , A : Union[str, Any]="<unk>" , A : Optional[int]="<pad>" , A : Union[str, Any]="<mask>" , A : Any=False , A : Optional[Any]=True , **A : List[Any] , ) ->Optional[int]:
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
lowerCamelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space:
lowerCamelCase__ : List[Any] = getattr(A , pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[Any] = pre_tok_class(**A )
lowerCamelCase__ : List[str] = add_prefix_space
lowerCamelCase__ : Optional[Any] = '''post_processor'''
lowerCamelCase__ : Optional[int] = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
lowerCamelCase__ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowerCamelCase__ : List[Any] = tuple(state['''cls'''] )
lowerCamelCase__ : Any = False
if state.get('''add_prefix_space''' , A ) != add_prefix_space:
lowerCamelCase__ : Tuple = add_prefix_space
lowerCamelCase__ : List[Any] = True
if state.get('''trim_offsets''' , A ) != trim_offsets:
lowerCamelCase__ : int = trim_offsets
lowerCamelCase__ : int = True
if changes_to_apply:
lowerCamelCase__ : Optional[int] = getattr(A , state.pop('''type''' ) )
lowerCamelCase__ : Any = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
def __lowerCamelCase ( self : List[str] ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self : str , A : Optional[Any] ) ->Optional[int]:
lowerCamelCase__ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
lowerCamelCase__ : int = value
def __lowerCamelCase ( self : Optional[int] , *A : Tuple , **A : int ) ->BatchEncoding:
lowerCamelCase__ : Dict = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def __lowerCamelCase ( self : Dict , *A : int , **A : List[str] ) ->BatchEncoding:
lowerCamelCase__ : int = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def __lowerCamelCase ( self : List[Any] , A : str , A : Optional[str] = None ) ->Tuple[str]:
lowerCamelCase__ : Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def __lowerCamelCase ( self : Union[str, Any] , A : Tuple , A : Union[str, Any]=None ) ->Optional[Any]:
lowerCamelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : Any , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 265 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ):
"""simple docstring"""
__UpperCamelCase : List[Any] =parent
__UpperCamelCase : Tuple =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : List[str] =is_training
__UpperCamelCase : Optional[Any] =use_attention_mask
__UpperCamelCase : Optional[int] =use_token_type_ids
__UpperCamelCase : Any =use_labels
__UpperCamelCase : str =vocab_size
__UpperCamelCase : List[Any] =hidden_size
__UpperCamelCase : Any =num_hidden_layers
__UpperCamelCase : List[str] =num_attention_heads
__UpperCamelCase : List[Any] =intermediate_size
__UpperCamelCase : Dict =hidden_act
__UpperCamelCase : Tuple =hidden_dropout_prob
__UpperCamelCase : List[str] =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =max_position_embeddings
__UpperCamelCase : Any =type_vocab_size
__UpperCamelCase : Dict =type_sequence_label_size
__UpperCamelCase : Optional[int] =initializer_range
__UpperCamelCase : Optional[Any] =num_choices
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : List[Any] =None
if self.use_attention_mask:
__UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any =None
if self.use_token_type_ids:
__UpperCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : int =AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] =config_and_inputs
__UpperCamelCase : Optional[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =FlaxAlbertModelTester(self )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__UpperCamelCase : Optional[Any] =model_class_name.from_pretrained('albert-base-v2' )
__UpperCamelCase : str =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =FlaxAlbertModel.from_pretrained('albert-base-v2' )
__UpperCamelCase : str =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase : Union[str, Any] =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase : List[str] =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
__UpperCamelCase : int =(1, 11, 768)
self.assertEqual(output.shape , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 71 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""new-model"""
if is_tf_available():
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =NewModelConfig
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='bert-base-cased'
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='bert-base-cased'
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =copy.deepcopy(model.config )
__UpperCamelCase : Optional[Any] =['FunnelBaseModel']
__UpperCamelCase : Tuple =TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
try:
AutoConfig.register('new-model' , lowerCamelCase__ )
__UpperCamelCase : int =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : List[str] =BertModelTester(self ).get_config()
__UpperCamelCase : Optional[Any] =NewModelConfig(**tiny_config.to_dict() )
__UpperCamelCase : Dict =auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('bert-base' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained(lowerCamelCase__ , revision='aaaaaa' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , 'Use `from_pt=True` to load this model' ):
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 71 | 1 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( snake_case_ ):
_lowercase: Dict = '''philschmid/bart-large-cnn-samsum'''
_lowercase: str = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
_lowercase: Tuple = '''summarizer'''
_lowercase: int = AutoTokenizer
_lowercase: Optional[int] = AutoModelForSeqaSeqLM
_lowercase: Optional[Any] = ['''text''']
_lowercase: str = ['''text''']
def lowercase__ ( self : List[Any] , __snake_case : int ) -> Optional[Any]:
return self.pre_processor(__snake_case , return_tensors="""pt""" , truncation=__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : Any ) -> Optional[Any]:
return self.model.generate(**__snake_case )[0]
def lowercase__ ( self : Union[str, Any] , __snake_case : Any ) -> int:
return self.pre_processor.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
| 370 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if not (isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCAmelCase = i
_lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[str] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowercase : List[Any] = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowercase : Dict = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= VOCAB_FILES_NAMES
_a : Optional[int]= PRETRAINED_VOCAB_FILES_MAP
_a : List[Any]= PRETRAINED_INIT_CONFIGURATION
_a : Dict= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[Any]= LxmertTokenizer
def __init__( self ,snake_case=None ,snake_case=None ,snake_case=True ,snake_case="[UNK]" ,snake_case="[SEP]" ,snake_case="[PAD]" ,snake_case="[CLS]" ,snake_case="[MASK]" ,snake_case=True ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(
snake_case ,tokenizer_file=snake_case ,do_lower_case=snake_case ,unk_token=snake_case ,sep_token=snake_case ,pad_token=snake_case ,cls_token=snake_case ,mask_token=snake_case ,tokenize_chinese_chars=snake_case ,strip_accents=snake_case ,**snake_case ,)
lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,snake_case ) != tokenize_chinese_chars
):
lowercase : Optional[Any] = getattr(snake_case ,normalizer_state.pop("""type""" ) )
lowercase : int = do_lower_case
lowercase : Tuple = strip_accents
lowercase : Union[str, Any] = tokenize_chinese_chars
lowercase : str = normalizer_class(**snake_case )
lowercase : Tuple = do_lower_case
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Dict = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Dict = self._tokenizer.model.save(snake_case ,name=snake_case )
return tuple(snake_case )
| 20 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "microsoft/speecht5_tts"
_a : Tuple= (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_a : Dict= "text_reader"
_a : Optional[Any]= SpeechTaProcessor
_a : Tuple= SpeechTaForTextToSpeech
_a : Optional[int]= SpeechTaHifiGan
_a : Union[str, Any]= ["text"]
_a : Optional[int]= ["audio"]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.post_processor is None:
lowercase : Any = """microsoft/speecht5_hifigan"""
super().setup()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : int = self.pre_processor(text=snake_case ,return_tensors="""pt""" ,truncation=snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
lowercase : Tuple = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
lowercase : List[str] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(snake_case ).cpu().detach()
| 20 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : Dict = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ["""LayoutLMv3FeatureExtractor"""]
A_ : Optional[int] = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase_ ( _a : Tuple , _a : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : Union[str, Any] = strtobool(_a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCamelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase_ = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase_ = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase_ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase_ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase_ : Tuple = unittest.skip("""test requires faiss""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase_ : int = unittest.skip("""test requires regex""" )(_a )
return test_case
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase_ : List[str] = unittest.skip("""test requires elasticsearch""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase_ : Optional[Any] = unittest.skip("""test requires sqlalchemy""" )(_a )
return test_case
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase_ : Optional[Any] = unittest.skip("""test requires PyTorch""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase_ : Union[str, Any] = unittest.skip("""test requires TensorFlow""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase_ : Dict = unittest.skip("""test requires JAX""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase_ : Any = unittest.skip("""test requires Pillow""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Optional[int] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
def _require_spacy_model(_a : List[Any] ):
try:
import spacy # noqa F401
spacy.load(_a )
except ImportError:
return unittest.skip("""test requires spacy""" )(_a )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_a ) )(_a )
else:
return test_case
return _require_spacy_model
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_a )
else:
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase_ : str = unittest.skip("""test is slow""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase_ : int = unittest.skip("""test is local""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase_ : Union[str, Any] = unittest.skip("""test is packaged""" )(_a )
return test_case
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase_ : Tuple = unittest.skip("""test requires remote""" )(_a )
return test_case
def lowerCamelCase_ ( *_a : Optional[int] ):
'''simple docstring'''
def decorate(cls : List[Any] ):
for name, fn in cls.__dict__.items():
if callable(_a ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCAmelCase_ : int = decorator(_a )
setattr(cls , _a , _a )
return cls
return decorate
class _snake_case ( __snake_case ):
'''simple docstring'''
pass
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = 0
A__ : List[Any] = 1
A__ : Optional[Any] = 2
@contextmanager
def lowerCamelCase_ ( _a : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , _a : int=1E-16 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = requests.Session().request
def timeout_request(_a : Optional[Any] , _a : Dict , _a : Dict , **_a : Tuple ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase_ : int = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCAmelCase_ : Optional[int] = timeout
try:
return online_request(_a , _a , **_a )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase_ : Tuple = url
UpperCAmelCase_ : str = e.args[0]
UpperCAmelCase_ : Optional[int] = (max_retry_error.args[0].replace("""10.255.255.1""" , F'''OfflineMock[{url}]''' ),)
UpperCAmelCase_ : Any = (max_retry_error,)
raise
def raise_connection_error(_a : str , _a : Optional[Any] , **_a : Tuple ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_a )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _a ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _a ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _a ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def lowerCamelCase_ ( *_a : List[Any] , **_a : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_a , **_a ) as tmp_dir:
try:
os.chdir(_a )
yield
finally:
os.chdir(_a )
@contextmanager
def lowerCamelCase_ ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_ : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase_ ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
return deepcopy(_a ).integers(0 , 100 , 10 ).tolist() == deepcopy(_a ).integers(0 , 100 , 10 ).tolist()
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_a : List[Any] , *_a : Any , **_a : Optional[Any] ):
try:
return func(*_a , **_a )
except HTTPError as err:
if str(_a ).startswith("""500""" ) or str(_a ).startswith("""502""" ):
pytest.xfail(str(_a ) )
raise err
return decorator.decorator(_wrapper , _a )
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : List[Any] = stderr
async def lowerCamelCase_ ( _a : List[str] , _a : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Optional[Any] = await stream.readline()
if line:
callback(_a )
else:
break
async def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Any=False , _a : Union[str, Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(_a ) )
UpperCAmelCase_ : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : int = []
def tee(_a : int , _a : Dict , _a : Dict , _a : List[str]="" ):
UpperCAmelCase_ : Tuple = line.decode("""utf-8""" ).rstrip()
sink.append(_a )
if not quiet:
print(_a , _a , file=_a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _a : tee(_a , _a , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _a : tee(_a , _a , sys.stderr , label="""stderr:""" ) ),
] , timeout=_a , )
return _RunOutput(await p.wait() , _a , _a )
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Any=None , _a : Optional[Any]=None , _a : str=180 , _a : Dict=False , _a : Dict=True ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = asyncio.get_event_loop()
UpperCAmelCase_ : Any = loop.run_until_complete(
_stream_subprocess(_a , env=_a , stdin=_a , timeout=_a , quiet=_a , echo=_a ) )
UpperCAmelCase_ : List[str] = """ """.join(_a )
if result.returncode > 0:
UpperCAmelCase_ : Union[str, Any] = """\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCAmelCase_ : int = re.sub(r"""^gw""" , """""" , _a , 0 , re.M )
return int(_a )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 2_9500
UpperCAmelCase_ : Union[str, Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "swinv2"
A__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(lowerCamelCase_ )
UpperCAmelCase_ : str = num_heads
UpperCAmelCase_ : Tuple = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 345 | 1 |
from maths.prime_factors import prime_factors
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
if not isinstance(a__ , a__ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a__ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 |
from importlib import import_module
from .logging import get_logger
lowerCAmelCase__ :Optional[Any] = get_logger(__name__)
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = module._original_module if isinstance(_SCREAMING_SNAKE_CASE , _PatchedModuleObj ) else module
class __a :
_a : Any = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = obj
_UpperCAmelCase = target
_UpperCAmelCase = new
_UpperCAmelCase = target.split('.' )[0]
_UpperCAmelCase = {}
_UpperCAmelCase = attrs or []
def __enter__( self ) -> int:
"""simple docstring"""
*_UpperCAmelCase , _UpperCAmelCase = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
try:
_UpperCAmelCase = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_SCREAMING_SNAKE_CASE , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase = obj_attr
# patch at top level
setattr(self.obj , _SCREAMING_SNAKE_CASE , _PatchedModuleObj(_SCREAMING_SNAKE_CASE , attrs=self.attrs ) )
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _PatchedModuleObj(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , attrs=self.attrs ) )
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# finally set the target attribute
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase = getattr(import_module('.'.join(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _SCREAMING_SNAKE_CASE ) is attr_value:
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase = globals()['__builtins__'][target_attr]
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.original.pop(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 185 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = '''beit'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any=8_1_9_2 , SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Any=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1E-12 , SCREAMING_SNAKE_CASE__ : Any=2_2_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_6 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 5, 7, 1_1] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Any=0.4 , SCREAMING_SNAKE_CASE__ : Any=2_5_6 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : List[Any]=2_5_5 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : str = hidden_size
a_ : int = num_hidden_layers
a_ : Any = num_attention_heads
a_ : Any = intermediate_size
a_ : Optional[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : Optional[Any] = initializer_range
a_ : Any = layer_norm_eps
a_ : Dict = image_size
a_ : List[str] = patch_size
a_ : Optional[int] = num_channels
a_ : str = use_mask_token
a_ : Optional[int] = use_absolute_position_embeddings
a_ : Union[str, Any] = use_relative_position_bias
a_ : Optional[Any] = use_shared_relative_position_bias
a_ : Optional[int] = layer_scale_init_value
a_ : str = drop_path_rate
a_ : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
a_ : str = out_indices
a_ : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
a_ : Optional[int] = use_auxiliary_head
a_ : Optional[Any] = auxiliary_loss_weight
a_ : List[str] = auxiliary_channels
a_ : List[str] = auxiliary_num_convs
a_ : Dict = auxiliary_concat_input
a_ : str = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1E-4
| 32 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : int = int(np.ceil((x_end - xa) / step_size ) )
snake_case__ : List[Any] = np.zeros((n + 1,) )
snake_case__ : Any = ya
snake_case__ : Optional[Any] = xa
for k in range(__a ):
snake_case__ : Optional[Any] = y[k] + step_size * ode_func(__a , y[k] )
snake_case__ : List[str] = y[k] + (
(step_size / 2) * (ode_func(__a , y[k] ) + ode_func(x + step_size , __a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : int = []
def lowerCamelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , **snake_case_ : str ):
self.events.append("""on_init_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[Any] , **snake_case_ : List[str] ):
self.events.append("""on_train_begin""" )
def lowerCamelCase ( self : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : int , **snake_case_ : str ):
self.events.append("""on_train_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , **snake_case_ : int ):
self.events.append("""on_epoch_begin""" )
def lowerCamelCase ( self : List[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
self.events.append("""on_epoch_end""" )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , **snake_case_ : str ):
self.events.append("""on_step_begin""" )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Any , **snake_case_ : Optional[Any] ):
self.events.append("""on_step_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : List[Any] ):
self.events.append("""on_evaluate""" )
def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Optional[int] , **snake_case_ : Any ):
self.events.append("""on_predict""" )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Dict , **snake_case_ : str ):
self.events.append("""on_save""" )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : Optional[int] ):
self.events.append("""on_log""" )
def lowerCamelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , **snake_case_ : Tuple ):
self.events.append("""on_prediction_step""" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = tempfile.mkdtemp()
def lowerCamelCase ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[Any]=0 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=64 , snake_case_ : Optional[Any]=64 , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : int ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : Optional[int] = RegressionDataset(length=snake_case_ )
snake_case__ : Dict = RegressionDataset(length=snake_case_ )
snake_case__ : Any = RegressionModelConfig(a=snake_case_ , b=snake_case_ )
snake_case__ : str = RegressionPreTrainedModel(snake_case_ )
snake_case__ : Any = TrainingArguments(self.output_dir , disable_tqdm=snake_case_ , report_to=[] , **snake_case_ )
return Trainer(
snake_case_ , snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , callbacks=snake_case_ , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
# Order doesn't matter
snake_case__ : int = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
snake_case__ : Optional[int] = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and not isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , cba.__class__ )
elif not isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(cba.__class__ , snake_case_ )
else:
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Optional[int] = 0
snake_case__ : Any = len(trainer.get_eval_dataloader() )
snake_case__ : Optional[int] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(snake_case_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.get_trainer()
snake_case__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : int = self.get_trainer(disable_tqdm=snake_case_ )
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[Any] = trainer.pop_callback(snake_case_ )
self.assertEqual(cb.__class__ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# We can also add, pop, or remove by instance
snake_case__ : Optional[Any] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : Any = self.get_trainer()
snake_case__ : Dict = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[int] = trainer.pop_callback(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : str ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=snake_case_ )
snake_case__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# Independent log/save/eval
snake_case__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# A bit of everything
snake_case__ : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case_ ) in warn_mock.call_args[0][0]
| 43 | 0 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Optional[Any] =2
a__ : Dict =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__A =getLogger(__name__)
__A ="cuda" if torch.cuda.is_available() else "cpu"
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 8 , _UpperCAmelCase : str = DEFAULT_DEVICE , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[int]="summarization" , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_UpperCAmelCase ).open('''w''' , encoding='''utf-8''' )
__UpperCAmelCase : Optional[int] = str(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if fpaa:
__UpperCAmelCase : Dict = model.half()
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
__UpperCAmelCase : Dict = time.time()
# update config with task specific params
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase )
if prefix is None:
__UpperCAmelCase : List[Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCAmelCase , _UpperCAmelCase ) ) ):
__UpperCAmelCase : Any = [prefix + text for text in examples_chunk]
__UpperCAmelCase : Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='''pt''' , truncation=_UpperCAmelCase , padding='''longest''' ).to(_UpperCAmelCase )
__UpperCAmelCase : Dict = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCAmelCase , )
__UpperCAmelCase : List[Any] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCAmelCase : Dict = int(time.time() - start_time ) # seconds
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( _UpperCAmelCase : Any=True ):
'''simple docstring'''
__UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_UpperCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_UpperCAmelCase , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_UpperCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_UpperCAmelCase , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCAmelCase , __UpperCAmelCase : Dict = parser.parse_known_args()
__UpperCAmelCase : Optional[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
__UpperCAmelCase : Optional[int] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCAmelCase : Optional[int] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCAmelCase : int = generate_summaries_or_translations(
_UpperCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCAmelCase : str = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCAmelCase : Tuple = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCAmelCase : int = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCAmelCase )]
__UpperCAmelCase : dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
scores.update(_UpperCAmelCase )
if args.dump_args:
scores.update(_UpperCAmelCase )
if args.info:
__UpperCAmelCase : Dict = args.info
if verbose:
print(_UpperCAmelCase )
if args.score_path is not None:
json.dump(_UpperCAmelCase , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 226 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
_snake_case = 0
if start < end:
_snake_case = randint(UpperCAmelCase__ , UpperCAmelCase__ )
_snake_case = a[end]
_snake_case = a[pivot]
_snake_case = temp
_snake_case , _snake_case = _in_place_partition(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
count += _in_place_quick_sort(UpperCAmelCase__ , UpperCAmelCase__ , p - 1 )
count += _in_place_quick_sort(UpperCAmelCase__ , p + 1 , UpperCAmelCase__ )
return count
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
'''simple docstring'''
_snake_case = 0
_snake_case = randint(UpperCAmelCase__ , UpperCAmelCase__ )
_snake_case = a[end]
_snake_case = a[pivot]
_snake_case = temp
_snake_case = start - 1
for index in range(UpperCAmelCase__ , UpperCAmelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case = new_pivot_index + 1
_snake_case = a[new_pivot_index]
_snake_case = a[index]
_snake_case = temp
_snake_case = a[new_pivot_index + 1]
_snake_case = a[end]
_snake_case = temp
return new_pivot_index + 1, count
UpperCAmelCase_ = TemporaryFile()
UpperCAmelCase_ = 100 # 1000 elements are to be sorted
UpperCAmelCase_ , UpperCAmelCase_ = 0, 1 # mean and standard deviation
UpperCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase_ = np.load(outfile)
UpperCAmelCase_ = len(M) - 1
UpperCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 363 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295 | 0 |
'''simple docstring'''
lowerCamelCase : int = 9.8_0_6_6_5
def _SCREAMING_SNAKE_CASE (A , A , A = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 2 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int]=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=99 , __magic_name__ : List[str]=32 , __magic_name__ : int=2 , __magic_name__ : List[str]=4 , __magic_name__ : Tuple=37 , __magic_name__ : Dict="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[int]=5_12 , __magic_name__ : Tuple=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Dict=3 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=0 , ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = num_choices
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Union[str, Any] = projection_dim
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
UpperCAmelCase_ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : int = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFDPRQuestionEncoder(config=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = TFDPRReader(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__a : int = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__a : str = False
__a : str = False
__a : Dict = False
__a : Optional[Any] = False
__a : Any = False
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFDPRModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRQuestionEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRReader.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCAmelCase_ : Optional[int] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCAmelCase_ : List[Any] = model(__magic_name__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCAmelCase_ : List[str] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a__( lowerCamelCase__ ):
lowercase__ = """new-model"""
if is_tf_available():
class a__( lowerCamelCase__ ):
lowercase__ = NewModelConfig
@require_tf
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : Any ):
a : str = 'bert-base-cased'
a : Any = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = TFAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : List[Any] ):
a : int = 'bert-base-cased'
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = TFAutoModelForPreTraining.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : List[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = TFAutoModelForCausalLM.from_pretrained(__snake_case )
a , a : Tuple = TFAutoModelForCausalLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Any = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : List[str] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForMaskedLM.from_pretrained(__snake_case )
a , a : Any = TFAutoModelForMaskedLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case )
a , a : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Dict = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = TFAutoModelForSequenceClassification.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Dict ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_tensorflow_probability
def lowercase_ ( self : Tuple ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForTableQuestionAnswering.from_pretrained(__snake_case )
a , a : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(
__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : int ):
a : str = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Any ):
a : Any = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Dict ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
a : List[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(__snake_case , __snake_case )
a : int = copy.deepcopy(model.config )
a : Optional[int] = ['FunnelBaseModel']
a : Union[str, Any] = TFAutoModel.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
a : str = TFAutoModel.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : Optional[Any] ):
try:
AutoConfig.register('new-model' , __snake_case )
a : Union[str, Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
auto_class.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
a : Union[str, Any] = BertModelTester(self ).get_config()
a : Any = NewModelConfig(**tiny_config.to_dict() )
a : Optional[Any] = auto_class.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
a : Tuple = auto_class.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowercase_ ( self : Any ):
with self.assertRaisesRegex(
__snake_case , 'bert-base is not a local folder and is not a valid model identifier' ):
a : Dict = TFAutoModel.from_pretrained('bert-base' )
def lowercase_ ( self : Dict ):
with self.assertRaisesRegex(
__snake_case , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
a : str = TFAutoModel.from_pretrained(__snake_case , revision='aaaaaa' )
def lowercase_ ( self : Any ):
with self.assertRaisesRegex(
__snake_case , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
a : Any = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase_ ( self : List[Any] ):
with self.assertRaisesRegex(__snake_case , 'Use `from_pt=True` to load this model' ):
a : int = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def lowercase_ ( self : List[str] ):
# Make sure we have cached the model.
a : Dict = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
a : Optional[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
a : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
a : Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 96 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : int ):
super().setUp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __snake_case : str ):
a : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a , a : List[str] = self.get_input_output_texts(__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : str = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(__snake_case )
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[Any] = pickle.load(__snake_case )
a : Tuple = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : List[Any] ):
try:
a : int = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Any ):
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : str ):
a : Tuple = MecabTokenizer(do_lower_case=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Union[str, Any] ):
try:
a : Any = MecabTokenizer(
do_lower_case=__snake_case , normalize_text=__snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase_ ( self : List[Any] ):
a : Dict = MecabTokenizer(normalize_text=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase_ ( self : str ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(__snake_case )
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[int] = pickle.load(__snake_case )
a : List[Any] = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_sudachi
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Any ):
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase_ ( self : Dict ):
a : Optional[int] = SudachiTokenizer(do_lower_case=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Tuple ):
a : int = SudachiTokenizer(normalize_text=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = SudachiTokenizer(trim_whitespace=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__snake_case )
a : str = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : List[str] = pickle.load(__snake_case )
a : Any = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : List[Any] = JumanppTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : List[Any] = JumanppTokenizer(normalize_text=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : str = JumanppTokenizer(trim_whitespace=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase_ ( self : Any ):
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Optional[int] = {}
for i, token in enumerate(__snake_case ):
a : Dict = i
a : Optional[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a : List[Any] = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(__snake_case , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(__snake_case , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : str = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase_ ( self : List[Any] ):
super().setUp()
a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any] , **__snake_case : List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
a : int = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase_ ( self : str ):
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a : Tuple = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
__snake_case , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : Any ):
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Optional[Any] = {}
for i, token in enumerate(__snake_case ):
a : Tuple = i
a : Optional[int] = CharacterTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
a : List[Any] = 'cl-tohoku/bert-base-japanese'
a : Dict = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 96 | 1 |
"""simple docstring"""
A: Tuple = 0 # The first color of the flag.
A: Any = 1 # The second color of the flag.
A: List[Any] = 2 # The third color of the flag.
A: Optional[int] = (red, white, blue)
def _snake_case ( UpperCamelCase : list ):
if not sequence:
return []
if len(UpperCamelCase ) == 1:
return list(UpperCamelCase )
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[str] = len(UpperCamelCase ) - 1
UpperCAmelCase : List[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCAmelCase , UpperCAmelCase : str = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCAmelCase , UpperCAmelCase : str = sequence[high], sequence[mid]
high -= 1
else:
UpperCAmelCase : Union[str, Any] = F"The elements inside the sequence must contains only {colors} values"
raise ValueError(UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Tuple = input("Enter numbers separated by commas:\n").strip()
A: List[str] = [int(item.strip()) for item in user_input.split(",")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 109 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Optional[Any] = nn.Linear(3 , 4 )
__UpperCAmelCase : Optional[int] = nn.BatchNormad(4 )
__UpperCAmelCase : int = nn.Linear(4 , 5 )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) )
class lowerCamelCase__ ( A ):
"""simple docstring"""
def lowerCamelCase__ ( self : str , UpperCamelCase : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowerCamelCase__ ( A ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
return output + 1
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : List[str] = ModelHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
self.assertEqual(test_model._hf_hook , UpperCamelCase )
self.assertTrue(hasattr(UpperCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCamelCase )
self.assertFalse(hasattr(UpperCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCamelCase , """_old_forward""" ) )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = ModelForTest()
__UpperCAmelCase : Optional[Any] = ModelHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
add_hook_to_module(UpperCamelCase , UpperCamelCase , append=UpperCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , UpperCamelCase ) , UpperCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCamelCase )
self.assertFalse(hasattr(UpperCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCamelCase , """_old_forward""" ) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ModelForTest()
__UpperCAmelCase : str = torch.randn(2 , 3 )
__UpperCAmelCase : List[str] = test_model(x + 1 )
__UpperCAmelCase : Optional[int] = test_model(x + 2 )
__UpperCAmelCase : Optional[Any] = PreForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : Optional[Any] = PreForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[int] = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Optional[Any] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = test_model(UpperCamelCase )
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ModelForTest()
__UpperCAmelCase : str = torch.randn(2 , 3 )
__UpperCAmelCase : int = test_model(UpperCamelCase )
__UpperCAmelCase : Dict = PostForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : List[str] = PostForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : int = test_model(UpperCamelCase )
assert torch.allclose(UpperCamelCase , output + 2 , atol=1e-5 )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : int = ModelForTest()
__UpperCAmelCase : str = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[Any] = test_model(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = PostForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[int] = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Optional[int] = test_model(UpperCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__UpperCAmelCase : List[str] = torch.randn(2 , 3 )
__UpperCAmelCase : Any = model(UpperCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCamelCase , AlignDevicesHook(io_same_device=UpperCamelCase ) )
__UpperCAmelCase : int = torch.randn(2 , 3 ).to(0 )
__UpperCAmelCase : Any = model(UpperCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__UpperCAmelCase : Union[str, Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__UpperCAmelCase : Union[str, Any] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__UpperCAmelCase : str = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__UpperCAmelCase : Optional[int] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Any = torch.device(UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase )
__UpperCAmelCase : str = torch.randn(2 , 3 )
__UpperCAmelCase : Any = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase , offload_buffers=UpperCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__UpperCAmelCase : List[str] = torch.randn(2 , 3 )
__UpperCAmelCase : Any = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__UpperCAmelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : str = torch.device(UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase )
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase , weights_map=model.state_dict() , offload_buffers=UpperCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__UpperCAmelCase : int = torch.randn(2 , 3 )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 115 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Any=13 , _UpperCAmelCase : int=64 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Any=10 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=[1, 16, 4, 4] , _UpperCAmelCase : str=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase__ = (self.image_size // 32) ** 2
lowercase__ = num_patches + 1
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTHybridModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTHybridForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = ViTHybridModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase__ (self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowerCamelCase__ (self : Dict ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase__ = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTHybridModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowercase__ = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" )
lowercase__ = model(**_UpperCAmelCase )
lowercase__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 146 |
from functools import lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> set:
"""simple docstring"""
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__magic_name__ ) )
def UpperCamelCase ( __magic_name__ : list ) -> bool:
"""simple docstring"""
return len(set(__magic_name__ ) ) in (0, 1)
def UpperCamelCase ( __magic_name__ : int ) -> list:
"""simple docstring"""
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCamelCase ( __magic_name__ : int = 4 ) -> int:
"""simple docstring"""
lowercase__ = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 146 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.