code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def _a ( UpperCAmelCase = 10**9 ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : str = 2
lowerCamelCase__ : str = 0
lowerCamelCase__ : str = 0
lowerCamelCase__ : Tuple = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCamelCase__ : List[str] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 142 | import math
def A ( _lowercase ):
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = n
while left <= right:
SCREAMING_SNAKE_CASE : Union[str, Any] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
def __init__(self : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple=13 , snake_case__ : int=7 , snake_case__ : Optional[int]=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=99 , snake_case__ : Any=64 , snake_case__ : int=5 , snake_case__ : Tuple=4 , snake_case__ : List[Any]=64 , snake_case__ : int="gelu" , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=5_12 , snake_case__ : List[str]=16 , snake_case__ : List[Any]=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : int=3 , snake_case__ : int=4 , snake_case__ : Any=None , ) -> Optional[int]:
'''simple docstring'''
snake_case : str = parent
snake_case : Optional[int] = batch_size
snake_case : Tuple = seq_length
snake_case : str = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : str = use_labels
snake_case : Any = vocab_size
snake_case : List[str] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : Optional[int] = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : List[str] = num_labels
snake_case : Optional[int] = num_choices
snake_case : List[str] = scope
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[int] = None
snake_case : str = None
snake_case : List[str] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Any ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Optional[int] = model(snake_case__ , snake_case__ )
snake_case : int = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Dict = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Dict = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = self.num_labels
snake_case : str = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : int = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = self.num_choices
snake_case : List[str] = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = self.num_labels
snake_case : Union[str, Any] = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Tuple = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = self.prepare_config_and_inputs()
((snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case)) : List[Any] = config_and_inputs
snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,snake_case_ ,unittest.TestCase ):
A__ : List[str] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Optional[int] = False
A__ : Tuple = True
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = MPNetModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
snake_case : Dict = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case : Any = model(snake_case__ )[0]
snake_case : Optional[int] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , snake_case__ )
snake_case : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( A_ ):
def lowercase ( self : Any ):
_snake_case = Rectangle(height=0.5 , width=0.5 )
_snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
_snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
_snake_case = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
_snake_case = Text('''CPU''' , font_size=24 )
_snake_case = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
_snake_case = [mem.copy() for i in range(1 )]
_snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
_snake_case = Text('''GPU''' , font_size=24 )
_snake_case = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.align_to(_lowerCamelCase , _lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCamelCase )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
_snake_case = Text('''Model''' , font_size=24 )
_snake_case = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , )
_snake_case = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
_snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=2.5 ) , Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.add(_lowerCamelCase )
_snake_case = []
_snake_case = []
_snake_case = []
for i, rect in enumerate(_lowerCamelCase ):
_snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
cpu_target.move_to(_lowerCamelCase )
cpu_target.generate_target()
_snake_case = 0.4_6 / 4
_snake_case = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCamelCase , buff=0.0 )
cpu_targs.append(_lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 288 |
"""simple docstring"""
UpperCAmelCase__ = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 288 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__snake_case : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""]
__snake_case : Any = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
__snake_case : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 13 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[Any] = {'''vocab_file''': '''vocab.json'''}
__SCREAMING_SNAKE_CASE :Tuple = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__SCREAMING_SNAKE_CASE :List[str] = {'''mgp-str''': 27}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , snake_case_ : List[str] , snake_case_ : List[Any]="[GO]" , snake_case_ : Optional[Any]="[GO]" , snake_case_ : Union[str, Any]="[s]" , snake_case_ : Any="[GO]" , **snake_case_ : Dict ):
super().__init__(
unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase = json.load(snake_case_ )
_UpperCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def lowercase ( self : str ):
return len(self.vocab )
def lowercase ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def lowercase ( self : Optional[Any] , snake_case_ : int ):
_UpperCAmelCase = []
for s in text:
char_tokens.extend(snake_case_ )
return char_tokens
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] ):
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def lowercase ( self : int , snake_case_ : int ):
return self.decoder.get(snake_case_ )
def lowercase ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case_ ) )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + "\n" )
return (vocab_file,)
| 22 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = ["pixel_values"]
def __init__( self : Optional[int] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_55 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Dict , ) ->None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 2_24}
_UpperCAmelCase : List[Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_UpperCAmelCase : Union[str, Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ , param_name="crop_size" )
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : Tuple = size
_UpperCAmelCase : str = resample
_UpperCAmelCase : List[str] = do_center_crop
_UpperCAmelCase : int = crop_size
_UpperCAmelCase : Dict = do_rescale
_UpperCAmelCase : Any = rescale_factor
_UpperCAmelCase : Optional[int] = do_normalize
_UpperCAmelCase : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase : Tuple = do_convert_rgb
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[str] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase : List[str] = get_resize_output_image_size(lowerCamelCase__ , size=size["shortest_edge"] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : str , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : int , ) ->Any:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : int = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase__ : List[str] , ) ->PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Optional[int] = size if size is not None else self.size
_UpperCAmelCase : Optional[Any] = get_size_dict(lowerCamelCase__ , param_name="size" , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : Tuple = resample if resample is not None else self.resample
_UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : int = get_size_dict(lowerCamelCase__ , param_name="crop_size" , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Dict = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase : Optional[int] = [convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase : Tuple = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_UpperCAmelCase : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_UpperCAmelCase : List[Any] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
_UpperCAmelCase : int = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_UpperCAmelCase : Any = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_UpperCAmelCase : str = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_UpperCAmelCase : str = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 371 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
CustomConfig.register_for_auto_class()
_UpperCAmelCase : int = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Tuple = not c.scale_attn_weights # bool
_UpperCAmelCase : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = PretrainedConfig()
_UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(lowerCamelCase__ )}.""" )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Dict = ["config.42.0.0.json"]
_UpperCAmelCase : Union[str, Any] = 7_68
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Any = "v4.0.0"
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : List[Any] = "v3.0.0"
_UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 322 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] ):
__lowerCAmelCase = to_pil_image(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(SCREAMING_SNAKE_CASE_ , lang=SCREAMING_SNAKE_CASE_ , output_type="dict" , config=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE_ )
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( snake_case__ ):
_a : Dict = ["""pixel_values"""]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , _A = True , _A = None , _A = "" , **_A , ):
"""simple docstring"""
super().__init__(**_A )
__lowerCAmelCase = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(_A )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_value
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
"""simple docstring"""
__lowerCAmelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size["height"], size["width"])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = None , **_A , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A = None , **_A , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A=None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
"""simple docstring"""
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(_A )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(_A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
__lowerCAmelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images]
__lowerCAmelCase = BatchFeature(data={"pixel_values": images} , tensor_type=_A )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 92 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCamelCase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self ,A ,A ,A ,A = 1.0 ,A = None ,):
super().__init__()
UpperCAmelCase = initial_learning_rate
UpperCAmelCase = warmup_steps
UpperCAmelCase = power
UpperCAmelCase = decay_schedule_fn
UpperCAmelCase = name
def __call__( self ,A ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase = tf.cast(A ,tf.floataa )
UpperCAmelCase = tf.cast(self.warmup_steps ,tf.floataa )
UpperCAmelCase = global_step_float / warmup_steps_float
UpperCAmelCase = self.initial_learning_rate * tf.math.pow(A ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=A ,)
def _UpperCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( _snake_case , _snake_case , _snake_case , _snake_case = 0.0 , _snake_case = 0.9 , _snake_case = 0.999 , _snake_case = 1E-8 , _snake_case = None , _snake_case = None , _snake_case = 0.0 , _snake_case = 1.0 , _snake_case = None , ):
"""simple docstring"""
UpperCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
UpperCAmelCase = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
UpperCAmelCase = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_snake_case , )
else:
UpperCAmelCase = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCamelCase__ ( snake_case ):
def __init__( self ,A = 0.001 ,A = 0.9 ,A = 0.999 ,A = 1e-7 ,A = False ,A = 0.0 ,A = None ,A = None ,A = "AdamWeightDecay" ,**A ,):
super().__init__(A ,A ,A ,A ,A ,A ,**A )
UpperCAmelCase = weight_decay_rate
UpperCAmelCase = include_in_weight_decay
UpperCAmelCase = exclude_from_weight_decay
@classmethod
def _UpperCamelCase ( cls ,A ):
UpperCAmelCase = {"""WarmUp""": WarmUp}
return super(A ,cls ).from_config(A ,custom_objects=A )
def _UpperCamelCase ( self ,A ,A ,A ):
super(A ,self )._prepare_local(A ,A ,A )
UpperCAmelCase = tf.constant(
self.weight_decay_rate ,name="""adam_weight_decay_rate""" )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] ,use_locking=self._use_locking ,)
return tf.no_op()
def _UpperCamelCase ( self ,A ,A=None ,**A ):
UpperCAmelCase , UpperCAmelCase = list(zip(*A ) )
return super(A ,self ).apply_gradients(zip(A ,A ) ,name=A ,**A )
def _UpperCamelCase ( self ,A ,A ,A ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase = apply_state or {}
UpperCAmelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase = self._fallback_apply_state(A ,A )
UpperCAmelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _UpperCamelCase ( self ,A ,A ,A=None ):
UpperCAmelCase , UpperCAmelCase = self._get_lr(var.device ,var.dtype.base_dtype ,A )
UpperCAmelCase = self._decay_weights_op(A ,A ,A )
with tf.control_dependencies([decay] ):
return super(A ,self )._resource_apply_dense(A ,A ,**A )
def _UpperCamelCase ( self ,A ,A ,A ,A=None ):
UpperCAmelCase , UpperCAmelCase = self._get_lr(var.device ,var.dtype.base_dtype ,A )
UpperCAmelCase = self._decay_weights_op(A ,A ,A )
with tf.control_dependencies([decay] ):
return super(A ,self )._resource_apply_sparse(A ,A ,A ,**A )
def _UpperCamelCase ( self ):
UpperCAmelCase = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _UpperCamelCase ( self ,A ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(A ,A ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(A ,A ) is not None:
return False
return True
class lowerCamelCase__ ( snake_case ):
def __init__( self ):
UpperCAmelCase = []
UpperCAmelCase = None
@property
def _UpperCamelCase ( self ):
if self._accum_steps is None:
UpperCAmelCase = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=A ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def _UpperCamelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self ,A ):
if not self._gradients:
UpperCAmelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(A ) ,trainable=A ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(A ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(A )}''' )
for accum_gradient, gradient in zip(self._gradients ,A ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(A )
self._accum_steps.assign_add(1 )
def _UpperCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(A ) )
| 354 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_UpperCamelCase = {"""vinai/bartpho-syllable""": 1024}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A ,A ,A="<s>" ,A="</s>" ,A="</s>" ,A="<s>" ,A="<unk>" ,A="<pad>" ,A="<mask>" ,A = None ,**A ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(A ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,A ):
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self ,A ,A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.fairseq_ids_to_tokens )
def _UpperCamelCase ( self ):
UpperCAmelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self ,A ):
return self.sp_model.encode(A ,out_type=A )
def _UpperCamelCase ( self ,A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCamelCase ( self ,A ):
return self.fairseq_ids_to_tokens[index]
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """""".join(A ).replace(A ,""" """ ).strip()
return out_string
def _UpperCamelCase ( self ,A ,A = None ):
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"""wb""" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(A )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 234 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
snake_case__ : Union[str, Any] = TypeVar('T')
class A_ ( Generic[T] ):
def __init__(self :List[str] , _UpperCamelCase :List[str] )-> List[Any]:
__A = data
__A = None
def __str__(self :Optional[int] )-> str:
return f"""{self.data}"""
class A_ ( Generic[T] ):
def __init__(self :List[str] )-> None:
__A = None
def __iter__(self :Union[str, Any] )-> Iterator[T]:
__A = self.top
while node:
yield node.data
__A = node.next
def __str__(self :List[str] )-> str:
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __len__(self :Optional[Any] )-> int:
return len(tuple(iter(self ) ) )
def _lowerCAmelCase (self :str )-> bool:
return self.top is None
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] )-> None:
__A = Node(_SCREAMING_SNAKE_CASE )
if not self.is_empty():
__A = self.top
__A = node
def _lowerCAmelCase (self :Dict )-> T:
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , _SCREAMING_SNAKE_CASE )
__A = self.top
__A = self.top.next
return pop_node.data
def _lowerCAmelCase (self :str )-> T:
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def _lowerCAmelCase (self :Tuple )-> None:
__A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Tuple[int, ...]]:
"""simple docstring"""
snake_case_ = []
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple[int, ...]:
"""simple docstring"""
snake_case_ = []
for d in reversed(SCREAMING_SNAKE_CASE ):
idx.append(flat_idx % d )
snake_case_ = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE ) )
@torch.jit.ignore
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , )-> List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(SCREAMING_SNAKE_CASE ) -> None:
snake_case_ = True
for i in range(len(SCREAMING_SNAKE_CASE ) ):
snake_case_ = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case_ = l[reversed_idx]
if start_edges is None:
snake_case_ = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE )
if end_edges is None:
snake_case_ = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
reduce_edge_list(SCREAMING_SNAKE_CASE )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case_ = []
snake_case_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE , s + 1 ) )
else:
break
snake_case_ = tuple(SCREAMING_SNAKE_CASE )
snake_case_ = len(SCREAMING_SNAKE_CASE )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> torch.Tensor:
"""simple docstring"""
snake_case_ = t.shape[:no_batch_dims]
snake_case_ = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# _get_minimal_slice_set is inclusive
snake_case_ = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE ) )
# Get an ordered list of slices to perform
snake_case_ = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
snake_case_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , )-> Any:
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE ) > 0):
raise ValueError('''Must provide at least one input''' )
snake_case_ = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE )]
snake_case_ = tuple([max(SCREAMING_SNAKE_CASE ) for s in zip(*SCREAMING_SNAKE_CASE )] )
def _prep_inputs(SCREAMING_SNAKE_CASE ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case_ = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE )
snake_case_ = None
if _out is not None:
snake_case_ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case_ = 0
snake_case_ = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE ):
# Chunk the input
if not low_mem:
snake_case_ = _select_chunk
else:
snake_case_ = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE , flat_end=min(SCREAMING_SNAKE_CASE , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE ) , )
snake_case_ = tensor_tree_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Run the layer on the chunk
snake_case_ = layer(**SCREAMING_SNAKE_CASE )
# Allocate space for the output
if out is None:
snake_case_ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def assign(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assign(SCREAMING_SNAKE_CASE , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case_ = da[k]
assign(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for xa, xa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case_ = xa
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
snake_case_ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE )
return out
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase = 5_12 , ):
snake_case_ = max_chunk_size
snake_case_ = None
snake_case_ = None
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case_ = [c for c in candidates if c > min_chunk_size]
snake_case_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_UpperCAmelCase ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCAmelCase , chunk_size=_UpperCAmelCase )
return True
except RuntimeError:
return False
snake_case_ = 0
snake_case_ = len(_UpperCAmelCase ) - 1
while i > min_viable_chunk_size_index:
snake_case_ = test_chunk_size(candidates[i] )
if not viable:
snake_case_ = (min_viable_chunk_size_index + i) // 2
else:
snake_case_ = i
snake_case_ = (i + len(_UpperCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = True
for aa, aa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert type(_UpperCAmelCase ) == type(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )]
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
snake_case_ = True
snake_case_ = tree_map(lambda _UpperCAmelCase : a.shape if isinstance(_UpperCAmelCase , torch.Tensor ) else a , _UpperCAmelCase , _UpperCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCAmelCase )
snake_case_ = self._compare_arg_caches(self.cached_arg_data , _UpperCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
snake_case_ = False
if not consistent:
snake_case_ = self._determine_favorable_chunk_size(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
snake_case_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 267 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 267 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=3, UpperCAmelCase__ : Optional[int]=3_2, UpperCAmelCase__ : List[Any]=3, UpperCAmelCase__ : Optional[int]=1_0, UpperCAmelCase__ : List[str]=[1_0, 2_0, 3_0, 4_0], UpperCAmelCase__ : List[str]=[1, 1, 2, 1], UpperCAmelCase__ : Dict=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]="relu", UpperCAmelCase__ : int=3, UpperCAmelCase__ : Tuple=None, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(_UpperCAmelCase )
def _lowercase ( self : Dict ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = self.get_config()
return config, pixel_values
def _lowercase ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def _lowercase ( self : Dict, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : int ):
__lowercase = FlaxRegNetModel(config=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), )
def _lowercase ( self : str, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str] ):
__lowercase = self.num_labels
__lowercase = FlaxRegNetForImageClassification(config=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase ,__lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
def _lowercase ( self : List[str] ):
__lowercase = FlaxRegNetModelTester(self )
__lowercase = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase )
def _lowercase ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : int ):
return
def _lowercase ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowercase ( self : List[Any] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowercase ( self : Optional[Any] ):
pass
def _lowercase ( self : Any ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def _lowercase ( self : Optional[int] ):
def check_hidden_states_output(UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[Any] ):
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ), expected_num_stages + 1 )
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : str ):
return model(pixel_values=_UpperCAmelCase, **_UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase, _UpperCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def _A ( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def _lowercase ( self : Union[str, Any] ):
__lowercase = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_UpperCAmelCase, return_tensors="np" )
__lowercase = model(**_UpperCAmelCase )
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
__lowercase = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
| 17 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : int , __snake_case : List[Any]=7 , __snake_case : Any=3 , __snake_case : Any=18 , __snake_case : str=30 , __snake_case : Any=4_00 , __snake_case : Optional[int]=True , __snake_case : str=None , __snake_case : Any=True , __snake_case : List[Any]=None , ):
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
def lowerCamelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''crop_size''' ) )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : str ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 177 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
UpperCAmelCase_ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Tuple[int, ...] ) -> Tuple[int, ...]:
UpperCAmelCase_ = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Optional[Sequence[bool]] = None , __UpperCamelCase : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__UpperCamelCase : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(__UpperCamelCase , __UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase , __UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(__UpperCamelCase )
UpperCAmelCase_ = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : torch.Tensor , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> torch.Tensor:
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(__UpperCamelCase , __UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , __UpperCamelCase ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Callable , __UpperCamelCase : Dict[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool = False , __UpperCamelCase : Any = None , __UpperCamelCase : bool = False , ) -> Any:
if not (len(__UpperCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
UpperCAmelCase_ = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , __UpperCamelCase )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=__UpperCamelCase , flat_end=min(__UpperCamelCase , i + chunk_size ) , no_batch_dims=len(__UpperCamelCase ) , )
UpperCAmelCase_ = tensor_tree_map(__UpperCamelCase , __UpperCamelCase )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase , __UpperCamelCase ):
def assign(__UpperCamelCase : dict , __UpperCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
assign(__UpperCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(__UpperCamelCase , __UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase , __UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(__UpperCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCamelCase )
return out
class a :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : int = 5_12 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase_ ( self : List[Any] , __snake_case : Callable , __snake_case : tuple , __snake_case : int ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__snake_case : int ) -> bool:
try:
with torch.no_grad():
fn(*__snake_case , chunk_size=__snake_case )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__snake_case ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase_ ( self : int , __snake_case : Iterable , __snake_case : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__snake_case , __snake_case ):
assert type(__snake_case ) == type(__snake_case )
if isinstance(__snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase_ ( self : str , __snake_case : Callable , __snake_case : tuple , __snake_case : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __snake_case : a.shape if isinstance(__snake_case , torch.Tensor ) else a , __snake_case , __snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__snake_case )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __snake_case )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__snake_case , __snake_case , __snake_case , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 177 | 1 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
snake_case_ = HUGGINGFACE_HUB_CACHE
snake_case_ = """config.json"""
snake_case_ = """diffusion_pytorch_model.bin"""
snake_case_ = """diffusion_flax_model.msgpack"""
snake_case_ = """model.onnx"""
snake_case_ = """diffusion_pytorch_model.safetensors"""
snake_case_ = """weights.pb"""
snake_case_ = """https://huggingface.co"""
snake_case_ = default_cache_path
snake_case_ = """diffusers_modules"""
snake_case_ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
snake_case_ = ["""fp16""", """non-ema"""]
snake_case_ = """.self_attn"""
| 78 | import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 87 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = (UnCLIPScheduler,)
def a ( self : Union[str, Any] , **_lowercase : str ):
__UpperCAmelCase = {
'''num_train_timesteps''': 10_00,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_lowercase )
return config
def a ( self : Dict ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def a ( self : Optional[Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowercase )
def a ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def a ( self : Dict ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowercase )
def a ( self : Dict ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowercase )
def a ( self : Dict ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowercase , prev_timestep=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__UpperCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_994_987 ) ) < 1E-5
def a ( self : int ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(variance_type='''learned_range''' )
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowercase ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=_lowercase ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=_lowercase ) - -0.0_010_011 < 1E-5
def a ( self : Any ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = scheduler.timesteps
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowercase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def a ( self : Any ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
scheduler.set_timesteps(25 )
__UpperCAmelCase = scheduler.timesteps
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowercase , _lowercase )
if i + 1 == timesteps.shape[0]:
__UpperCAmelCase = None
else:
__UpperCAmelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(
_lowercase , _lowercase , _lowercase , prev_timestep=_lowercase , generator=_lowercase ).prev_sample
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowercase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def a ( self : List[str] ):
pass
def a ( self : int ):
pass
| 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : int = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bloom"
a__ : List[Any] = ["past_key_values"]
a__ : Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , _lowercase : Dict=25_08_80 , _lowercase : str=64 , _lowercase : int=2 , _lowercase : Union[str, Any]=8 , _lowercase : Optional[Any]=1E-5 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=True , _lowercase : Any=1 , _lowercase : Dict=2 , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=0.0 , _lowercase : str=0.0 , _lowercase : str=1 , _lowercase : int=False , **_lowercase : List[str] , ):
__UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase = kwargs.pop('''n_embed''' , _lowercase )
__UpperCAmelCase = hidden_size if n_embed is None else n_embed
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = apply_residual_connection_post_layernorm
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = version.parse("1.12" )
def __init__( self : Optional[int] , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[int] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' , inverted_values_shape=_lowercase )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Any ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
@property
def a ( self : Dict ):
return 1E-3
def a ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 86 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "luke"
def __init__( self : str ,lowerCamelCase__ : Optional[Any]=50_267 ,lowerCamelCase__ : int=500_000 ,lowerCamelCase__ : List[Any]=768 ,lowerCamelCase__ : List[Any]=256 ,lowerCamelCase__ : Dict=12 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : List[Any]=3_072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Optional[int]=512 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : int=1e-12 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : int=0 ,lowerCamelCase__ : str=2 ,**lowerCamelCase__ : List[Any] ,):
super().__init__(pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = entity_vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = entity_emb_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_entity_aware_attention
UpperCAmelCase__ = classifier_dropout
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ = 'bert-base-cased'
lowercase__ = 'google/pegasus-xsum'
lowercase__ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase__ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase__ = 'patrickvonplaten/t5-tiny-random'
lowercase__ = 'sshleifer/bart-tiny-random'
lowercase__ = 'sshleifer/tiny-mbart'
lowercase__ = 'sshleifer/tiny-marian-en-de'
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = '\n'.join(__UpperCAmelCase )
Path(__UpperCAmelCase ).open('w' ).writelines(__UpperCAmelCase )
def _snake_case ( lowercase__ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__UpperCAmelCase , f'''{split}.source''' ) , __UpperCAmelCase )
_dump_articles(os.path.join(__UpperCAmelCase , f'''{split}.target''' ) , __UpperCAmelCase )
return tmp_dir
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCamelCase : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase : int = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
_lowerCamelCase : Tuple = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Union[str, Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowerCamelCase, _lowerCamelCase : Any = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_lowerCamelCase : List[str] = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=lowercase_ , max_target_length=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , )
_lowerCamelCase : List[str] = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowercase_ , lowercase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowerCamelCase : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A_ ( self , lowercase ):
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCamelCase : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase : List[str] = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
_lowerCamelCase : Dict = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
_lowerCamelCase : Dict = 4
_lowerCamelCase : Optional[Any] = LegacySeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=20 , max_target_length=lowercase_ , )
_lowerCamelCase : str = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A_ ( self ):
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_lowerCamelCase : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowerCamelCase : int = tmp_dir.joinpath('train.source' ).open().readlines()
_lowerCamelCase : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowercase_ , lowercase_ , 128 , lowercase_ )
_lowerCamelCase : Any = {x.name for x in tmp_dir.iterdir()}
_lowerCamelCase : int = {x.name for x in save_dir.iterdir()}
_lowerCamelCase : Any = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowercase_ ) < len(lowercase_ )
assert len(lowercase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowercase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def A_ ( self ):
if not FAIRSEQ_AVAILABLE:
return
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_dataset(max_len=64 )
_lowerCamelCase : Any = 64
_lowerCamelCase : Tuple = ds.make_dynamic_sampler(lowercase_ , required_batch_size_multiple=lowercase_ )
_lowerCamelCase : Dict = [len(lowercase_ ) for x in batch_sampler]
assert len(set(lowercase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowercase_ ) == len(lowercase_ ) # no dropped or added examples
_lowerCamelCase : Optional[Any] = DataLoader(lowercase_ , batch_sampler=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = []
for batch in data_loader:
_lowerCamelCase : Dict = batch['input_ids'].shape
_lowerCamelCase : List[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowerCamelCase : Union[str, Any] = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowercase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowercase_ )
assert num_src_per_batch[0] == max(lowercase_ )
if failures:
raise AssertionError(F'''too many tokens in {len(lowercase_ )} batches''' )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = self._get_dataset(max_len=512 )
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Any = ds.make_sortish_sampler(lowercase_ , shuffle=lowercase_ )
_lowerCamelCase : List[Any] = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase : Any = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowercase_ )
_lowerCamelCase : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(lowercase , lowercase="input_ids" ):
return [batch[k].eq(lowercase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowercase_ , k='labels' ) ) < sum(count_pad_tokens(lowercase_ , k='labels' ) )
assert sum(count_pad_tokens(lowercase_ ) ) < sum(count_pad_tokens(lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ )
def A_ ( self , lowercase=1000 , lowercase=128 ):
if os.getenv('USE_REAL_DATA' , lowercase_ ):
_lowerCamelCase : str = 'examples/seq2seq/wmt_en_ro'
_lowerCamelCase : Dict = max_len * 2 * 64
if not Path(lowercase_ ).joinpath('train.len' ).exists():
save_len_file(lowercase_ , lowercase_ )
else:
_lowerCamelCase : List[Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_lowerCamelCase : List[str] = max_len * 4
save_len_file(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCamelCase : Union[str, Any] = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='train' , max_source_length=lowercase_ , max_target_length=lowercase_ , n_obs=lowercase_ , )
return ds, max_tokens, tokenizer
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_dataset()
_lowerCamelCase : Any = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowercase_ ) )
_lowerCamelCase : List[Any] = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowercase_ ) )
assert idsa.intersection(lowercase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
if tok_name == MBART_TINY:
_lowerCamelCase : Tuple = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_lowerCamelCase : str = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowerCamelCase : Optional[int] = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_lowerCamelCase : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowercase_ ) == 1 if tok_name == BART_TINY else len(lowercase_ ) == 0 | 355 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 0 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCamelCase__: Tuple = logging.getLogger(__name__)
UpperCamelCase__: str = 50 # max width of layer names
UpperCamelCase__: List[str] = 70 # max width of quantizer names
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Any:
UpperCAmelCase : List[str] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_lowerCAmelCase , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_lowerCAmelCase , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_lowerCAmelCase , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_lowerCAmelCase , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_lowerCAmelCase , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_lowerCAmelCase , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> List[Any]:
if args.calibrator == "max":
UpperCAmelCase : str = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCAmelCase : Optional[int] = '''histogram'''
elif args.calibrator == "mse":
UpperCAmelCase : List[str] = '''histogram'''
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
UpperCAmelCase : Optional[int] = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCAmelCase )
UpperCAmelCase : List[str] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : int=False ) -> List[str]:
logger.info('''Configuring Model for Quantization''' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCAmelCase , ['''embeddings'''] , which='''weight''' , _disabled=_lowerCAmelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCAmelCase , [''''''] , _disabled=_lowerCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCAmelCase , args.quant_disable_keyword , _disabled=_lowerCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCAmelCase , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_lowerCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCAmelCase , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_lowerCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCAmelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCAmelCase , _lowerCAmelCase )
if args.clip_gelu:
clip_gelu(_lowerCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : str ) -> List[Any]:
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> str:
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ) -> Any:
def fusea(_lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCAmelCase , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCAmelCase : Union[str, Any] = qq._amax.detach().item()
UpperCAmelCase : Tuple = qk._amax.detach().item()
UpperCAmelCase : Optional[int] = qv._amax.detach().item()
UpperCAmelCase : str = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
qq._amax.fill_(_lowerCAmelCase )
qk._amax.fill_(_lowerCAmelCase )
qv._amax.fill_(_lowerCAmelCase )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCAmelCase : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCAmelCase )
UpperCAmelCase : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def snake_case_ ( _lowerCAmelCase : Dict ) -> Any:
for name, mod in model.named_modules():
if hasattr(_lowerCAmelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase : Any = mod.weight.shape[0]
UpperCAmelCase : Dict = mod._weight_quantizer._amax.detach()
UpperCAmelCase : Any = torch.ones(_lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
for name, mod in model.named_modules():
if hasattr(_lowerCAmelCase , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase : str = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase : Any = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase : List[str] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCAmelCase , keepdims=_lowerCAmelCase ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
UpperCAmelCase : Optional[Any] = amax
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=25 , _lowerCAmelCase : Dict=180 , _lowerCAmelCase : str=None ) -> str:
if ignore is None:
UpperCAmelCase : Optional[Any] = []
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Any = [ignore]
UpperCAmelCase : Tuple = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCAmelCase , '''weight''' ):
continue
UpperCAmelCase : Union[str, Any] = max(_lowerCAmelCase , len(_lowerCAmelCase ) )
for name, mod in model.named_modules():
UpperCAmelCase : Union[str, Any] = getattr(_lowerCAmelCase , '''_input_quantizer''' , _lowerCAmelCase )
UpperCAmelCase : Tuple = getattr(_lowerCAmelCase , '''_weight_quantizer''' , _lowerCAmelCase )
if not hasattr(_lowerCAmelCase , '''weight''' ):
continue
if type(_lowerCAmelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCAmelCase ) is str and s in name]:
continue
UpperCAmelCase : str = f"""Act:{input_q.extra_repr()}"""
UpperCAmelCase : int = f"""Wgt:{weight_q.extra_repr()}"""
UpperCAmelCase : Tuple = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_lowerCAmelCase ) <= line_width:
logger.info(_lowerCAmelCase )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def snake_case_ ( _lowerCAmelCase : Dict ) -> List[Any]:
UpperCAmelCase : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(_lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Tuple:
UpperCAmelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCAmelCase , _lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]="both" , **_lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : List[Any] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_lowerCAmelCase , _lowerCAmelCase , '''_input_quantizer''' , _lowerCAmelCase , _lowerCAmelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCAmelCase , _lowerCAmelCase , '''_weight_quantizer''' , _lowerCAmelCase , _lowerCAmelCase )
logger.info(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ) -> str:
for name, mod in model.named_modules():
if hasattr(_lowerCAmelCase , '''_input_quantizer''' ) or hasattr(_lowerCAmelCase , '''_weight_quantizer''' ):
for n in names:
if re.search(_lowerCAmelCase , _lowerCAmelCase ):
set_quantizers(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : List[str] = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
logger.info(_lowerCAmelCase )
| 23 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Any = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = '''lower newer'''
snake_case_ : Dict = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Union[str, Any] = '''lower'''
snake_case_ : Optional[int] = ['''low''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279 | 0 |
import string
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : str = ''
for i in sequence:
lowerCamelCase__ : str = ord(_UpperCAmelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Dict = string.ascii_letters
lowerCamelCase__ : Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCAmelCase )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
print('Running performance benchmarks...' )
lowerCamelCase__ : Optional[Any] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=_UpperCAmelCase )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)" , setup=_UpperCAmelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 45 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> int:
lowerCamelCase__ : int = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Any = 'The dog is cute and lives in the garden house'
lowerCamelCase__ : Union[str, Any] = jnp.array([tokenizer.encode(UpperCAmelCase )] )
lowerCamelCase__ : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : str = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A : Dict = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
from __future__ import annotations
def __UpperCAmelCase ( a_ , a_ , a_ , a_): # noqa: E741
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def __UpperCAmelCase ( a_):
if len(a_) == 0:
return 0
snake_case_ = [0] * len(a_)
snake_case_ = 1
snake_case_ = v[0]
for i in range(1 , len(a_)):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__magic_name__ :Optional[Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = TextaTextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = generator('Something there' )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ANY(__UpperCAmelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
lowerCAmelCase__ :Dict = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
] , )
lowerCAmelCase__ :Any = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
[{'generated_text': ANY(__UpperCAmelCase )}, {'generated_text': ANY(__UpperCAmelCase )}],
] , )
with self.assertRaises(__UpperCAmelCase ):
generator(4 )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
lowerCAmelCase__ :Dict = generator('Something there' , do_sample=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ''}] )
lowerCAmelCase__ :Optional[int] = 3
lowerCAmelCase__ :str = generator(
'Something there' , num_return_sequences=__UpperCAmelCase , num_beams=__UpperCAmelCase , )
lowerCAmelCase__ :str = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = generator('This is a test' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
lowerCAmelCase__ :List[str] = generator.model.config.eos_token_id
lowerCAmelCase__ :int = '<pad>'
lowerCAmelCase__ :Optional[Any] = generator(
['This is a test', 'This is a second test'] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , )
self.assertEqual(
__UpperCAmelCase , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
lowerCAmelCase__ :Optional[int] = generator('Something there' , do_sample=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [{'generated_text': ''}] )
| 254 |
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCamelCase_ ( _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = []
for line in lines:
_UpperCAmelCase : Optional[Any] = re.sub(R"#.*" , "" , _UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = "\n".join(_UpperCAmelCase )
# Make a hash from all this code
_UpperCAmelCase : Optional[int] = full_str.encode("utf-8" )
return shaaaa(_UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE : Tuple = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE : str = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 31 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 | 1 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
while repunit:
SCREAMING_SNAKE_CASE__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }")
| 218 |
import comet # From: unbabel-comet
import torch
import datasets
_SCREAMING_SNAKE_CASE : List[str] = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_SCREAMING_SNAKE_CASE : str = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict ) -> Tuple:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=False ) -> str:
if gpus is None:
SCREAMING_SNAKE_CASE__ = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
SCREAMING_SNAKE_CASE__ = [dict(zip(__lowerCamelCase , __lowerCamelCase ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.scorer.predict(__lowerCamelCase , gpus=__lowerCamelCase , progress_bar=__lowerCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 218 | 1 |
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A : int = 6 ) ->None:
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
self.create_linked_list(A )
def __lowerCamelCase ( self : Optional[int] , A : int ) ->None:
lowerCamelCase__ : Optional[int] = Node()
lowerCamelCase__ : List[str] = current_node
lowerCamelCase__ : Union[str, Any] = current_node
lowerCamelCase__ : List[str] = current_node
for _ in range(1 , A ):
lowerCamelCase__ : List[str] = Node()
lowerCamelCase__ : List[Any] = current_node
lowerCamelCase__ : Optional[Any] = previous_node
lowerCamelCase__ : Dict = current_node
lowerCamelCase__ : Union[str, Any] = self.front
lowerCamelCase__ : int = previous_node
def __lowerCamelCase ( self : Optional[int] ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self : Optional[int] ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self : Optional[int] , A : Any ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase__ : List[str] = self.rear.next
if self.rear:
lowerCamelCase__ : Optional[Any] = data
def __lowerCamelCase ( self : str ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase__ : List[Any] = self.front.data
lowerCamelCase__ : Optional[Any] = None
return data
lowerCamelCase__ : Optional[int] = self.front
lowerCamelCase__ : Optional[int] = old_front.next
lowerCamelCase__ : Any = old_front.data
lowerCamelCase__ : List[str] = None
return data
def __lowerCamelCase ( self : Dict ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCamelCase ( self : int ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->None:
lowerCamelCase__ : Any | None = None
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = 384
if "tiny" in model_name:
lowerCamelCase__ : Optional[int] = [3, 3, 9, 3]
lowerCamelCase__ : Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase__ : Dict = [3, 3, 27, 3]
lowerCamelCase__ : Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase__ : Optional[int] = [3, 3, 27, 3]
lowerCamelCase__ : Optional[Any] = [128, 256, 512, 1024]
lowerCamelCase__ : List[Any] = 512
if "large" in model_name:
lowerCamelCase__ : List[str] = [3, 3, 27, 3]
lowerCamelCase__ : int = [192, 384, 768, 1536]
lowerCamelCase__ : str = 768
if "xlarge" in model_name:
lowerCamelCase__ : Any = [3, 3, 27, 3]
lowerCamelCase__ : str = [256, 512, 1024, 2048]
lowerCamelCase__ : Optional[Any] = 1024
# set label information
lowerCamelCase__ : Optional[int] = 150
lowerCamelCase__ : Any = '''huggingface/label-files'''
lowerCamelCase__ : Any = '''ade20k-id2label.json'''
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Optional[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Any = ConvNextConfig(
depths=UpperCAmelCase , hidden_sizes=UpperCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : Dict = UperNetConfig(
backbone_config=UpperCAmelCase , auxiliary_in_channels=UpperCAmelCase , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , )
return config
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = dct.pop(UpperCAmelCase )
lowerCamelCase__ : List[Any] = val
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : str = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
lowerCamelCase__ : Union[str, Any] = model_name_to_url[model_name]
lowerCamelCase__ : int = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''state_dict''']
lowerCamelCase__ : List[str] = get_upernet_config(UpperCAmelCase )
lowerCamelCase__ : Tuple = UperNetForSemanticSegmentation(UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ : Optional[int] = state_dict.pop(UpperCAmelCase )
if "bn" in key:
lowerCamelCase__ : str = key.replace('''bn''' , '''batch_norm''' )
lowerCamelCase__ : List[Any] = val
# rename keys
lowerCamelCase__ : List[str] = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
# verify on image
lowerCamelCase__ : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ).convert('''RGB''' )
lowerCamelCase__ : Optional[int] = SegformerImageProcessor()
lowerCamelCase__ : Any = processor(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
if model_name == "upernet-convnext-tiny":
lowerCamelCase__ : Any = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase__ : List[str] = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase__ : str = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase__ : Tuple = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 142 | 1 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (EulerDiscreteScheduler,)
__UpperCamelCase = 1_0
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_)
return config
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Any = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : str = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE_ : str = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Any = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 10.08_07) < 1e-2
assert abs(result_mean.item() - 0.01_31) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 0.00_02) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : List[Any] = sample.to(lowercase_)
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Any = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Dict = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 10.08_07) < 1e-2
assert abs(result_mean.item() - 0.01_31) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : int = sample.to(lowercase_)
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19) < 1e-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63) < 1e-3
| 318 |
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Union[tf.Tensor, np.ndarray] ):
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
UpperCamelCase :Optional[Any] = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
UpperCamelCase :str = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def _A ( SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCamelCase , UpperCamelCase :Optional[Any] = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCamelCase :Dict = [1] * inputs.shape.rank
UpperCamelCase :Dict = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
UpperCamelCase :Optional[Any] = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
UpperCamelCase :Dict = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCamelCase :Tuple = tf.shape(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCamelCase :int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : tf.Tensor ):
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
UpperCamelCase :Tuple = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCamelCase :Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCamelCase :Optional[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCamelCase :int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _A ( SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str = "input_ids" ):
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :Optional[int] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCamelCase :List[Any] = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
UpperCamelCase :List[Any] = np.asarray(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = 1
UpperCamelCase :Dict = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCamelCase :List[str] = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[Any] = chunk_data
else:
UpperCamelCase :List[str] = data
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if name in group.attrs:
UpperCamelCase :Dict = [n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE__ , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCamelCase :str = []
UpperCamelCase :List[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE__ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 259 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='git_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :List[str] = num_channels
UpperCamelCase :Optional[int] = patch_size
UpperCamelCase :Optional[int] = image_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = attention_dropout
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
UpperCamelCase :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='git'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=101 , SCREAMING_SNAKE_CASE_=102 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
UpperCamelCase :Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
UpperCamelCase :Union[str, Any] = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[str] = intermediate_size
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = max_position_embeddings
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Any = layer_norm_eps
UpperCamelCase :int = position_embedding_type
UpperCamelCase :Dict = use_cache
UpperCamelCase :Tuple = tie_word_embeddings
UpperCamelCase :Union[str, Any] = num_image_with_embedding
UpperCamelCase :Optional[int] = bos_token_id
UpperCamelCase :List[Any] = eos_token_id
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase :Optional[int] = self.vision_config.to_dict()
UpperCamelCase :int = self.__class__.model_type
return output
| 259 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 159 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "ibert"
def __init__( self : int , _UpperCAmelCase : Optional[int]=3_0522 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any="none" , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = quant_mode
_lowerCAmelCase : str = force_dequant
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 159 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a__ : Any = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
__SCREAMING_SNAKE_CASE = ['''tokenizer''']
__SCREAMING_SNAKE_CASE = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , lowercase , lowercase=None ) -> Optional[int]:
super().__init__(lowercase )
__UpperCamelCase = speaker_embeddings
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
__UpperCamelCase = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
__UpperCamelCase = None
else:
with open(lowercase ) as speaker_embeddings_json:
__UpperCamelCase = json.load(lowercase )
else:
__UpperCamelCase = None
__UpperCamelCase = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def __lowerCamelCase ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ) -> Any:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
__UpperCamelCase = {}
__UpperCamelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__UpperCamelCase = self._load_voice_preset(lowercase )
__UpperCamelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=lowercase , )
__UpperCamelCase = os.path.join(lowercase , f"{prompt_key}_{key}.npy" )
__UpperCamelCase = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase = None , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.speaker_embeddings[voice_preset]
__UpperCamelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
__UpperCamelCase = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
__UpperCamelCase = np.load(lowercase )
return voice_preset_dict
def __lowerCamelCase ( self , lowercase = None ) -> Optional[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=2_5_6 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ) -> Dict:
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__UpperCamelCase = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
__UpperCamelCase = voice_preset + """.npz"""
__UpperCamelCase = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
__UpperCamelCase = BatchFeature(data=lowercase , tensor_type=lowercase )
__UpperCamelCase = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
__UpperCamelCase = voice_preset
return encoded_text
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class a_ ( lowerCamelCase__ ):
'''simple docstring'''
UpperCAmelCase_ = """efficientnet"""
def __init__( self : Optional[Any] , lowercase__ : str = 3 , lowercase__ : Tuple = 600 , lowercase__ : int = 2.0 , lowercase__ : int = 3.1 , lowercase__ : Optional[int] = 8 , lowercase__ : Union[str, Any] = [3, 3, 5, 3, 5, 5, 3] , lowercase__ : Any = [32, 16, 24, 40, 80, 112, 192] , lowercase__ : List[str] = [16, 24, 40, 80, 112, 192, 320] , lowercase__ : Dict = [] , lowercase__ : List[str] = [1, 2, 2, 2, 1, 2, 1] , lowercase__ : List[Any] = [1, 2, 2, 3, 3, 4, 1] , lowercase__ : str = [1, 6, 6, 6, 6, 6, 6] , lowercase__ : Union[str, Any] = 0.25 , lowercase__ : Tuple = "swish" , lowercase__ : Union[str, Any] = 2_560 , lowercase__ : List[str] = "mean" , lowercase__ : int = 0.02 , lowercase__ : Optional[int] = 0.001 , lowercase__ : Dict = 0.99 , lowercase__ : Dict = 0.5 , lowercase__ : List[Any] = 0.2 , **lowercase__ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case__)
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = width_coefficient
lowerCAmelCase__ = depth_coefficient
lowerCAmelCase__ = depth_divisor
lowerCAmelCase__ = kernel_sizes
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = out_channels
lowerCAmelCase__ = depthwise_padding
lowerCAmelCase__ = strides
lowerCAmelCase__ = num_block_repeats
lowerCAmelCase__ = expand_ratios
lowerCAmelCase__ = squeeze_expansion_ratio
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dim
lowerCAmelCase__ = pooling_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = batch_norm_eps
lowerCAmelCase__ = batch_norm_momentum
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = drop_connect_rate
lowerCAmelCase__ = sum(snake_case__) * 4
class a_ ( lowerCamelCase__ ):
'''simple docstring'''
UpperCAmelCase_ = version.parse('1.11' )
@property
def __snake_case ( self : int):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
return 1e-5
| 362 | import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = 'docs/source/en/_toctree.yml'
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase__ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase__ = []
for duplicate_key in duplicates:
lowerCAmelCase__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def __lowerCamelCase ( lowerCAmelCase__=False ):
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase__ = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase__ = api_doc[model_idx]['sections']
lowerCAmelCase__ = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if 'sections' in section]
lowerCAmelCase__ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase__ = modality_doc['sections']
lowerCAmelCase__ = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowerCAmelCase__ = True
if overwrite:
lowerCAmelCase__ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase__ = model_doc
lowerCAmelCase__ = api_doc
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 119 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : Any = {}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = super().add_tokens(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = []
if num_vec_per_token == 1:
self.try_adding_tokens(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
output.append(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Optional[int] = []
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : int = placeholder_token + f"""_{i}"""
self.try_adding_tokens(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
output.append(SCREAMING_SNAKE_CASE__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
lowercase : Any = output
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1.0 ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=SCREAMING_SNAKE_CASE__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase : List[Any] = self.token_map[placeholder_token]
lowercase : Dict = tokens[: 1 + int(len(SCREAMING_SNAKE_CASE__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase : List[Any] = copy.copy(SCREAMING_SNAKE_CASE__ )
random.shuffle(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = text.replace(SCREAMING_SNAKE_CASE__ , ''' '''.join(SCREAMING_SNAKE_CASE__ ) )
return text
def __call__( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
SCREAMING_SNAKE_CASE__ , vector_shuffle=SCREAMING_SNAKE_CASE__ , prop_tokens_to_load=SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
SCREAMING_SNAKE_CASE__ , vector_shuffle=SCREAMING_SNAKE_CASE__ , prop_tokens_to_load=SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 359 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'pegasus'
A : int = ['past_key_values']
A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[Any] = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : Dict = d_model
lowercase : Optional[Any] = encoder_ffn_dim
lowercase : int = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Tuple = decoder_ffn_dim
lowercase : List[str] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : Tuple = dropout
lowercase : int = attention_dropout
lowercase : Optional[Any] = activation_dropout
lowercase : Dict = activation_function
lowercase : Optional[Any] = init_std
lowercase : Tuple = encoder_layerdrop
lowercase : Optional[int] = decoder_layerdrop
lowercase : List[Any] = use_cache
lowercase : Any = encoder_layers
lowercase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
| 173 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : str = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__lowerCAmelCase : int = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = MBartTokenizer
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : str , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a = src_lang if src_lang is not None else "en_XX"
a = self.convert_tokens_to_ids(self._src_lang )
a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str ) -> None:
a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : List[Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a = src_lang
a = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
a = self.convert_tokens_to_ids(__lowerCamelCase )
a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : str = "en_XX" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "ro_RO" , **__lowerCamelCase : str , ) -> BatchEncoding:
a = src_lang
a = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] ) -> None:
a = self.convert_tokens_to_ids(__lowerCamelCase )
a = []
a = [self.eos_token_id, self.cur_lang_code]
a = self.convert_ids_to_tokens(self.prefix_tokens )
a = self.convert_ids_to_tokens(self.suffix_tokens )
a = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str ) -> None:
a = self.convert_tokens_to_ids(__lowerCamelCase )
a = []
a = [self.eos_token_id, self.cur_lang_code]
a = self.convert_ids_to_tokens(self.prefix_tokens )
a = self.convert_ids_to_tokens(self.suffix_tokens )
a = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 107 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __magic_name__ ( A : Tuple, A : List[Any], A : List[Any], A : Dict ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __magic_name__ ( A : List[Any], A : int, A : Optional[Any], A : Optional[int], A : Any=True ):
'''simple docstring'''
model.train()
a = model(A )
a = F.mse_loss(A, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(A )
def __magic_name__ ( A : Any, A : Any=False ):
'''simple docstring'''
set_seed(42 )
a = RegressionModel()
a = deepcopy(A )
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
model.to(accelerator.device )
if sched:
a = AdamW(params=model.parameters(), lr=1E-3 )
a = AdamW(params=ddp_model.parameters(), lr=1E-3 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
# Make a copy of `model`
if sched:
a , a , a , a = accelerator.prepare(A, A, A, A )
else:
a , a = accelerator.prepare(A, A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(A, A, A, A )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : List[Any]=False, A : List[Any]=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a = get_training_setup(A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A, A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(A ):
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
GradientState._reset_state()
def __magic_name__ ( A : List[Any]=False, A : Any=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a , a , a , a , a = get_training_setup(A, A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(A, A, A, A, A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(A ):
step_model(A, A, A, A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A ))
if accelerator.num_processes > 1:
check_model_parameters(A, A, A, A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
a = RegressionDataset(length=96 )
a = DataLoader(A, batch_size=16 )
a , a = accelerator.prepare(A, A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if iteration < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if batch_num < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(A, A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(A, A )
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 107 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( __UpperCamelCase ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : str = 2
lowerCAmelCase_ : Tuple = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase )
if n > 1:
factors.append(__UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
if is_torch_available():
import torch
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ) -> Dict:
"""simple docstring"""
if rng is None:
lowerCAmelCase_ : int = global_rng
lowerCAmelCase_ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Dict , a_ : Dict=7 , a_ : int=4_00 , a_ : Union[str, Any]=20_00 , a_ : Any=1 , a_ : Optional[int]=0.0 , a_ : str=1_60_00 , a_ : Optional[int]=True , a_ : Dict=True , ):
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = min_seq_length
lowerCAmelCase_ : List[Any] = max_seq_length
lowerCAmelCase_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : Dict = feature_size
lowerCAmelCase_ : Tuple = padding_value
lowerCAmelCase_ : int = sampling_rate
lowerCAmelCase_ : str = return_attention_mask
lowerCAmelCase_ : Union[str, Any] = do_normalize
def lowerCamelCase ( self : Dict ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : List[Any] , a_ : List[Any]=False , a_ : Optional[int]=False ):
def _flatten(a_ : Optional[Any] ):
return list(itertools.chain(*a_ ) )
if equal_length:
lowerCAmelCase_ : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : List[Any] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = ASTFeatureExtractor
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = ASTFeatureExtractionTester(self )
def lowerCamelCase ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : str = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase_ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase_ : Tuple = feat_extract(a_ , padding=a_ , return_tensors="np" ).input_values
lowerCAmelCase_ : int = feat_extract(a_ , padding=a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase_ : Union[str, Any] = np.asarray(a_ )
lowerCAmelCase_ : str = feat_extract(a_ , return_tensors="np" ).input_values
lowerCAmelCase_ : List[Any] = feat_extract(a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
@require_torch
def lowerCamelCase ( self : List[str] ):
import torch
lowerCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Tuple = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase_ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase ( self : List[Any] , a_ : List[str] ):
from datasets import load_dataset
lowerCAmelCase_ : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase_ : Optional[int] = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase ( self : str ):
# fmt: off
lowerCAmelCase_ : Tuple = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
lowerCAmelCase_ : Dict = self._load_datasamples(1 )
lowerCAmelCase_ : Union[str, Any] = ASTFeatureExtractor()
lowerCAmelCase_ : int = feature_extractor(a_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 161 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
return "".join(chr(ord(__lowerCamelCase ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 124 |
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 0 |
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int | None = None ,_UpperCAmelCase : int | None = None ) -> None:
if start is None:
__snake_case : Dict = 0
if end is None:
__snake_case : Optional[Any] = len(_UpperCAmelCase ) - 1
if start >= end:
return
__snake_case : List[str] = (start + end) // 2
slowsort(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
slowsort(_UpperCAmelCase ,mid + 1 ,_UpperCAmelCase )
if sequence[end] < sequence[mid]:
__snake_case : Union[str, Any] = sequence[mid], sequence[end]
slowsort(_UpperCAmelCase ,_UpperCAmelCase ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __A( a ):
snake_case_ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Any:
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_snake_case )
return config
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = len(_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__a = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a = pred_prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a = scheduler_class(**_snake_case )
__a = len(_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__a = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a = pred_prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_snake_case )
__a = scheduler.timesteps
for i, timestep in enumerate(_snake_case ):
if i == len(_snake_case ) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(_snake_case )
__a = prev_t.item()
self.assertEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 51, 0]
with self.assertRaises(_snake_case , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 1, 0]
__a = len(_snake_case )
with self.assertRaises(_snake_case , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_snake_case ) | 6 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = np.argmax(__lowerCAmelCase , axis=1 )
return np.sum(outputs == labels )
def _A ( A__ ):
"""simple docstring"""
with open(__lowerCAmelCase , encoding='''utf_8''' ) as f:
__lowercase = csv.reader(__lowerCAmelCase )
__lowercase = []
next(__lowerCAmelCase ) # skip the first line
for line in tqdm(__lowerCAmelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _A ( A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
for dataset in encoded_datasets:
__lowercase = len(__lowerCAmelCase )
__lowercase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__lowercase = np.zeros((n_batch, 2) , dtype=np.intaa )
__lowercase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__lowercase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCAmelCase ):
__lowercase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowercase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowercase = with_conta
__lowercase = with_conta
__lowercase = len(__lowerCAmelCase ) - 1
__lowercase = len(__lowerCAmelCase ) - 1
__lowercase = with_conta
__lowercase = with_conta
__lowercase = mc_label
__lowercase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__lowerCAmelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=__lowerCAmelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=__lowerCAmelCase , default='''''' )
parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=__lowerCAmelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=__lowerCAmelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=__lowerCAmelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__lowerCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=__lowerCAmelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=__lowerCAmelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=__lowerCAmelCase , default=6.25e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=__lowerCAmelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=__lowerCAmelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=__lowerCAmelCase , default=0.0_1 )
parser.add_argument('''--lm_coef''' , type=__lowerCAmelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=__lowerCAmelCase , default=374 )
parser.add_argument('''--server_ip''' , type=__lowerCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
__lowercase = parser.parse_args()
print(__lowerCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowercase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowercase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCAmelCase )
__lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
__lowercase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
model.to(__lowerCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(A__ ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return obj
return [tokenize_and_encode(__lowerCAmelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowercase = load_rocstories_dataset(args.train_dataset )
__lowercase = load_rocstories_dataset(args.eval_dataset )
__lowercase = (train_dataset, eval_dataset)
__lowercase = tokenize_and_encode(__lowerCAmelCase )
# Compute the max input length for the Transformer
__lowercase = model.config.n_positions // 2 - 2
__lowercase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowercase = min(__lowerCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowercase = pre_process_datasets(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase )
__lowercase , __lowercase = tensor_datasets[0], tensor_datasets[1]
__lowercase = TensorDataset(*__lowerCAmelCase )
__lowercase = RandomSampler(__lowerCAmelCase )
__lowercase = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.train_batch_size )
__lowercase = TensorDataset(*__lowerCAmelCase )
__lowercase = SequentialSampler(__lowerCAmelCase )
__lowercase = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowercase = args.max_steps
__lowercase = args.max_steps // (len(__lowerCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowercase = len(__lowerCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowercase = list(model.named_parameters() )
__lowercase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowercase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowercase = AdamW(__lowerCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
__lowercase = get_linear_schedule_with_warmup(
__lowerCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCAmelCase )
if args.do_train:
__lowercase , __lowercase , __lowercase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
__lowercase = 0
__lowercase = 0
__lowercase = tqdm(__lowerCAmelCase , desc='''Training''' )
for step, batch in enumerate(__lowerCAmelCase ):
__lowercase = tuple(t.to(__lowerCAmelCase ) for t in batch )
__lowercase , __lowercase , __lowercase , __lowercase = batch
__lowercase = model(__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
__lowercase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowercase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowercase = '''Training loss: {:.2e} lr: {:.2e}'''.format(__lowerCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowercase = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowercase = os.path.join(args.output_dir , __lowerCAmelCase )
__lowercase = os.path.join(args.output_dir , __lowerCAmelCase )
torch.save(model_to_save.state_dict() , __lowerCAmelCase )
model_to_save.config.to_json_file(__lowerCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowercase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowercase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCAmelCase )
if args.do_eval:
model.eval()
__lowercase , __lowercase = 0, 0
__lowercase , __lowercase = 0, 0
for batch in tqdm(__lowerCAmelCase , desc='''Evaluating''' ):
__lowercase = tuple(t.to(__lowerCAmelCase ) for t in batch )
__lowercase , __lowercase , __lowercase , __lowercase = batch
with torch.no_grad():
__lowercase , __lowercase , __lowercase , __lowercase = model(
__lowerCAmelCase , mc_token_ids=__lowerCAmelCase , lm_labels=__lowerCAmelCase , mc_labels=__lowerCAmelCase )
__lowercase = mc_logits.detach().cpu().numpy()
__lowercase = mc_labels.to('''cpu''' ).numpy()
__lowercase = accuracy(__lowerCAmelCase , __lowerCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowercase = eval_loss / nb_eval_steps
__lowercase = eval_accuracy / nb_eval_examples
__lowercase = tr_loss / nb_tr_steps if args.do_train else None
__lowercase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowercase = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __lowerCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _A ( A__ ):
"""simple docstring"""
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _A ( A__ ):
"""simple docstring"""
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _A ( A__ ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
lowerCAmelCase__ = 1
while K:
lowerCAmelCase__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
lowerCAmelCase__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 52 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case :Optional[int] = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''table-transformer'''
UpperCamelCase__ : Tuple = ['''past_key_values''']
UpperCamelCase__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=100 , __SCREAMING_SNAKE_CASE : Tuple=6 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : Union[str, Any]=6 , __SCREAMING_SNAKE_CASE : str=2_048 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Dict="relu" , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Dict="sine" , __SCREAMING_SNAKE_CASE : str="resnet50" , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = backbone_config.get('''model_type''')
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(__SCREAMING_SNAKE_CASE)
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.d_model
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return 1E-5
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 12
| 49 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase =logging.get_logger(__name__)
lowercase =['model.decoder.embed_positions.weights']
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
if "emb" in name:
_UpperCAmelCase : List[str] =name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
_UpperCAmelCase : Optional[Any] =name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
_UpperCAmelCase : str =name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
_UpperCAmelCase : int =name.replace('linear1' , 'fc1' )
if "linear2" in name:
_UpperCAmelCase : Optional[Any] =name.replace('linear2' , 'fc2' )
if "norm1" in name:
_UpperCAmelCase : List[str] =name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
_UpperCAmelCase : List[str] =name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
_UpperCAmelCase : List[Any] =name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
_UpperCAmelCase : Any =name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
_UpperCAmelCase : str =name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCAmelCase : List[Any] =name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase__ ( __lowerCamelCase : OrderedDict , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : str =list(state_dict.keys() )
_UpperCAmelCase : List[str] ={}
for key in keys:
_UpperCAmelCase : Union[str, Any] =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : List[str] =rename_keys(__lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCAmelCase : Optional[int] =val[:hidden_size, :]
_UpperCAmelCase : Optional[int] =val[hidden_size : 2 * hidden_size, :]
_UpperCAmelCase : Tuple =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCAmelCase : Union[str, Any] =val
else:
_UpperCAmelCase : Union[str, Any] =val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
_UpperCAmelCase : Tuple =1_0_2_4
_UpperCAmelCase : List[Any] =2_4
_UpperCAmelCase : List[Any] =1_6
elif checkpoint == "medium":
_UpperCAmelCase : List[Any] =1_5_3_6
_UpperCAmelCase : List[str] =4_8
_UpperCAmelCase : str =2_4
elif checkpoint == "large":
_UpperCAmelCase : Optional[Any] =2_0_4_8
_UpperCAmelCase : int =4_8
_UpperCAmelCase : Union[str, Any] =3_2
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_UpperCAmelCase : Any =MusicgenDecoderConfig(
hidden_size=__lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , )
return config
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]="cpu" ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =MusicGen.get_pretrained(__lowerCamelCase , device=__lowerCamelCase )
_UpperCAmelCase : List[str] =decoder_config_from_checkpoint(__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =fairseq_model.lm.state_dict()
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =rename_state_dict(
__lowerCamelCase , hidden_size=decoder_config.hidden_size )
_UpperCAmelCase : Union[str, Any] =TaEncoderModel.from_pretrained('t5-base' )
_UpperCAmelCase : int =EncodecModel.from_pretrained('facebook/encodec_32khz' )
_UpperCAmelCase : List[Any] =MusicgenForCausalLM(__lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCAmelCase , _UpperCAmelCase : List[Any] =decoder.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(__lowerCamelCase ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_UpperCAmelCase : Tuple =MusicgenForConditionalGeneration(text_encoder=__lowerCamelCase , audio_encoder=__lowerCamelCase , decoder=__lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCamelCase )
# check we can do a forward pass
_UpperCAmelCase : Any =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_UpperCAmelCase : Optional[int] =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_UpperCAmelCase : str =model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
_UpperCAmelCase : Any =AutoTokenizer.from_pretrained('t5-base' )
_UpperCAmelCase : Dict =AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
_UpperCAmelCase : Tuple =MusicgenProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
# set the appropriate bos/pad token ids
_UpperCAmelCase : Dict =2_0_4_8
_UpperCAmelCase : int =2_0_4_8
# set other default generation config params
_UpperCAmelCase : List[Any] =int(3_0 * audio_encoder.config.frame_rate )
_UpperCAmelCase : Optional[Any] =True
_UpperCAmelCase : Union[str, Any] =3.0
if pytorch_dump_folder is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__lowerCamelCase )
processor.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
lowercase =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 242 |
'''simple docstring'''
from typing import Any
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ):
'''simple docstring'''
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
_UpperCAmelCase : dict ={}
_UpperCAmelCase : dict ={}
for state in states_space:
_UpperCAmelCase : int =observations_space[0]
_UpperCAmelCase : int =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase : int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
_UpperCAmelCase : List[Any] =observations_space[o]
_UpperCAmelCase : Optional[int] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase : List[str] =''
_UpperCAmelCase : Dict =-1
for k_state in states_space:
_UpperCAmelCase : List[str] =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase : int =probability
_UpperCAmelCase : List[Any] =k_state
# Update probabilities and pointers dicts
_UpperCAmelCase : str =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase : List[Any] =arg_max
# The final observation
_UpperCAmelCase : int =observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
_UpperCAmelCase : Any =''
_UpperCAmelCase : Union[str, Any] =-1
for k_state in states_space:
_UpperCAmelCase : Optional[int] =probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase : Union[str, Any] =probability
_UpperCAmelCase : int =k_state
_UpperCAmelCase : int =arg_max
# Process pointers backwards
_UpperCAmelCase : List[str] =last_state
_UpperCAmelCase : Optional[int] =[]
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
'''simple docstring'''
_validate_list(__lowerCamelCase , 'observations_space' )
_validate_list(__lowerCamelCase , 'states_space' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
if not isinstance(_object , __lowerCamelCase ):
_UpperCAmelCase : Any =f"{var_name} must be a list"
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : Optional[int] =f"{var_name} must be a list of strings"
raise ValueError(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
_validate_dict(__lowerCamelCase , 'initial_probabilities' , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , 'transition_probabilities' )
_validate_nested_dict(__lowerCamelCase , 'emission_probabilities' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ):
'''simple docstring'''
if not isinstance(_object , __lowerCamelCase ):
_UpperCAmelCase : List[str] =f"{var_name} must be a dict"
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
_UpperCAmelCase : str =f"{var_name} all keys must be strings"
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
_UpperCAmelCase : int ='nested dictionary ' if nested else ''
_UpperCAmelCase : Optional[int] =f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 242 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : str = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def A_ ( _UpperCAmelCase ):
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
SCREAMING_SNAKE_CASE_: List[Any] = tmp_path_factory.getbasetemp() / "cache"
SCREAMING_SNAKE_CASE_: Union[str, Any] = test_hf_cache_home / "datasets"
SCREAMING_SNAKE_CASE_: Tuple = test_hf_cache_home / "metrics"
SCREAMING_SNAKE_CASE_: Union[str, Any] = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_UpperCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_UpperCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: int = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[int] = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_UpperCAmelCase ) )
@pytest.fixture(autouse=_UpperCAmelCase , scope="session" )
def A_ ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _UpperCAmelCase )
@pytest.fixture
def A_ ( _UpperCAmelCase ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _UpperCAmelCase )
| 13 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
def _UpperCamelCase (a__ :Any , a__ :int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCamelCase__ = str(bin(__a ) )[2:] # remove the leading "0b"
UpperCamelCase__ = str(bin(__a ) )[2:] # remove the leading "0b"
UpperCamelCase__ = max(len(__a ) , len(__a ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase__ = g.get_repo("""huggingface/transformers""" )
UpperCamelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
UpperCamelCase__ = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 87 | 0 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
return x if y == 0 else greatest_common_divisor(lowercase__, x % y )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
return (x * y) // greatest_common_divisor(lowercase__, lowercase__ )
def _UpperCamelCase ( snake_case__ = 20 ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 1
for i in range(1, n + 1 ):
__UpperCAmelCase : Tuple = lcm(lowercase__, lowercase__ )
return g
if __name__ == "__main__":
print(F'{solution() = }')
| 157 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def UpperCamelCase__ ( lowercase__ : int ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Optional[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: str ,__UpperCamelCase: str=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Optional[Any] = ''
else:
SCREAMING_SNAKE_CASE : int = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : str = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowercase__( __UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE : str = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Optional[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BitConfig(
global_padding='same' ,layer_type='bottleneck' ,depths=(3, 4, 9) ,out_features=['stage3'] ,embedding_dynamic_padding=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : Dict = ViTHybridConfig(backbone_config=__UpperCamelCase ,image_size=3_84 ,num_labels=10_00 )
SCREAMING_SNAKE_CASE : Tuple = False
# load original model from timm
SCREAMING_SNAKE_CASE : int = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : str = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = 'huggingface/label-files'
SCREAMING_SNAKE_CASE : Dict = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE : str = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE : Dict = ViTHybridModel(__UpperCamelCase ).eval()
else:
SCREAMING_SNAKE_CASE : Any = ViTHybridForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
SCREAMING_SNAKE_CASE : Any = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = transform.transforms
SCREAMING_SNAKE_CASE : Optional[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : List[str] = ViTHybridImageProcessor(
do_resize=__UpperCamelCase ,size={'shortest_edge': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Dict = transform(__UpperCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[str] = processor(__UpperCamelCase ,return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
print('Predicted class:' ,logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE : str = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1e-3 )
else:
SCREAMING_SNAKE_CASE : str = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
UpperCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 246 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = '''visual_bert'''
def __init__( self, A=30_522, A=768, A=512, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1E-12, A=False, A=True, A=1, A=0, A=2, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : int = visual_embedding_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = bypass_transformer
SCREAMING_SNAKE_CASE : Any = special_visual_initialize
| 246 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 50 ) -> int:
'''simple docstring'''
_A = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 315 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase__ : Any = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint["""input_conv.weight_g"""]
a = checkpoint["""input_conv.weight_v"""]
a = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates)):
a = checkpoint[f'''upsamples.{i}.1.weight_g''']
a = checkpoint[f'''upsamples.{i}.1.weight_v''']
a = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
for j in range(len(config.resblock_dilation_sizes)):
a = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
a = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
a = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
a = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
a = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
a = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
a = checkpoint["""output_conv.1.weight_g"""]
a = checkpoint["""output_conv.1.weight_v"""]
a = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> List[Any]:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(lowercase__)
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(lowercase__)
a = torch.load(lowercase__)
load_weights(orig_checkpoint["model"]["generator"] , lowercase__ , lowercase__)
a = np.load(lowercase__)
a = stats[0].reshape(-1)
a = stats[1].reshape(-1)
a = torch.from_numpy(lowercase__).float()
a = torch.from_numpy(lowercase__).float()
model.save_pretrained(lowercase__)
if repo_id:
print("Pushing to the hub...")
model.push_to_hub(lowercase__)
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase__ : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 356 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """table-transformer"""
a : Tuple = ["""past_key_values"""]
a : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A , A ):
a = backbone_config.get("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(A )
# set timm attributes to None
a , a , a = None, None, None
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = decoder_layerdrop
a = encoder_layers
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class a__ ( UpperCamelCase__ ):
a : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return 12
| 180 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a : Dict = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()] | 160 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'roformer'
def __init__( self , lowercase=50_000 , lowercase=None , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_536 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase=False , lowercase=True , **lowercase , ) -> Dict:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size if embedding_size is None else embedding_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = rotary_value
lowerCAmelCase = use_cache
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 46 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''onnx''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['onnx'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
| 90 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 177 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
UpperCAmelCase_ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Tuple[int, ...] ) -> Tuple[int, ...]:
UpperCAmelCase_ = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Optional[Sequence[bool]] = None , __UpperCamelCase : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__UpperCamelCase : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(__UpperCamelCase , __UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase , __UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(__UpperCamelCase )
UpperCAmelCase_ = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : torch.Tensor , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> torch.Tensor:
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(__UpperCamelCase , __UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , __UpperCamelCase ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Callable , __UpperCamelCase : Dict[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool = False , __UpperCamelCase : Any = None , __UpperCamelCase : bool = False , ) -> Any:
if not (len(__UpperCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
UpperCAmelCase_ = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , __UpperCamelCase )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=__UpperCamelCase , flat_end=min(__UpperCamelCase , i + chunk_size ) , no_batch_dims=len(__UpperCamelCase ) , )
UpperCAmelCase_ = tensor_tree_map(__UpperCamelCase , __UpperCamelCase )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase , __UpperCamelCase ):
def assign(__UpperCamelCase : dict , __UpperCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
assign(__UpperCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(__UpperCamelCase , __UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase , __UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(__UpperCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCamelCase )
return out
class a :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : int = 5_12 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase_ ( self : List[Any] , __snake_case : Callable , __snake_case : tuple , __snake_case : int ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__snake_case : int ) -> bool:
try:
with torch.no_grad():
fn(*__snake_case , chunk_size=__snake_case )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__snake_case ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase_ ( self : int , __snake_case : Iterable , __snake_case : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__snake_case , __snake_case ):
assert type(__snake_case ) == type(__snake_case )
if isinstance(__snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase_ ( self : str , __snake_case : Callable , __snake_case : tuple , __snake_case : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __snake_case : a.shape if isinstance(__snake_case , torch.Tensor ) else a , __snake_case , __snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__snake_case )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __snake_case )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__snake_case , __snake_case , __snake_case , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 177 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCAmelCase__ :Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 | 1 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case : Optional[Any] = NewType('DataClass', Any)
__snake_case : int = NewType('DataClassType', Any)
def _lowercase ( __snake_case ) -> List[Any]:
if isinstance(__snake_case ,__snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def _lowercase ( __snake_case ) -> Callable[[str], Any]:
__lowerCAmelCase : Any = {str(__snake_case ): choice for choice in choices}
return lambda __snake_case : str_to_choice.get(__snake_case ,__snake_case )
def _lowercase ( *,
__snake_case = None ,__snake_case = None ,__snake_case = dataclasses.MISSING ,__snake_case = dataclasses.MISSING ,__snake_case = None ,**__snake_case ,) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowerCAmelCase : List[Any] = {}
if aliases is not None:
__lowerCAmelCase : Any = aliases
if help is not None:
__lowerCAmelCase : List[str] = help
return dataclasses.field(metadata=__snake_case ,default=__snake_case ,default_factory=__snake_case ,**__snake_case )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[DataClassType, Iterable[DataClassType]] , **_SCREAMING_SNAKE_CASE: Tuple) -> Any:
"""simple docstring"""
if "formatter_class" not in kwargs:
__lowerCAmelCase : Dict = ArgumentDefaultsHelpFormatter
super().__init__(**_SCREAMING_SNAKE_CASE)
if dataclasses.is_dataclass(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Union[str, Any] = [dataclass_types]
__lowerCAmelCase : List[Any] = list(_SCREAMING_SNAKE_CASE)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_SCREAMING_SNAKE_CASE)
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: ArgumentParser , _SCREAMING_SNAKE_CASE: dataclasses.Field) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = F"""--{field.name}"""
__lowerCAmelCase : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _SCREAMING_SNAKE_CASE):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default")
__lowerCAmelCase : str = kwargs.pop("aliases" , [])
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Any = [aliases]
__lowerCAmelCase : Optional[int] = getattr(field.type , "__origin__" , field.type)
if origin_type is Union or (hasattr(_SCREAMING_SNAKE_CASE , "UnionType") and isinstance(_SCREAMING_SNAKE_CASE , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(_SCREAMING_SNAKE_CASE) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F""" Problem encountered in field '{field.name}'.""")
if type(_SCREAMING_SNAKE_CASE) not in field.type.__args__:
# filter `str` in Union
__lowerCAmelCase : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowerCAmelCase : Any = getattr(field.type , "__origin__" , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowerCAmelCase : Any = (
field.type.__args__[0] if isinstance(_SCREAMING_SNAKE_CASE , field.type.__args__[1]) else field.type.__args__[1]
)
__lowerCAmelCase : Optional[int] = getattr(field.type , "__origin__" , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowerCAmelCase : Tuple = {}
if origin_type is Literal or (isinstance(field.type , _SCREAMING_SNAKE_CASE) and issubclass(field.type , _SCREAMING_SNAKE_CASE)):
if origin_type is Literal:
__lowerCAmelCase : str = field.type.__args__
else:
__lowerCAmelCase : Tuple = [x.value for x in field.type]
__lowerCAmelCase : Union[str, Any] = make_choice_type_function(kwargs["choices"])
if field.default is not dataclasses.MISSING:
__lowerCAmelCase : int = field.default
else:
__lowerCAmelCase : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowerCAmelCase : Optional[int] = copy(_SCREAMING_SNAKE_CASE)
# Hack because type=bool in argparse does not behave as we want.
__lowerCAmelCase : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowerCAmelCase : Optional[int] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowerCAmelCase : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowerCAmelCase : List[str] = "?"
# This is the value that will get picked if we do --field_name (without value)
__lowerCAmelCase : Tuple = True
elif isclass(_SCREAMING_SNAKE_CASE) and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Union[str, Any] = field.type.__args__[0]
__lowerCAmelCase : Any = "+"
if field.default_factory is not dataclasses.MISSING:
__lowerCAmelCase : str = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowerCAmelCase : Optional[Any] = True
else:
__lowerCAmelCase : List[str] = field.type
if field.default is not dataclasses.MISSING:
__lowerCAmelCase : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowerCAmelCase : Union[str, Any] = field.default_factory()
else:
__lowerCAmelCase : Union[str, Any] = True
parser.add_argument(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowerCAmelCase : int = False
parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: DataClassType) -> List[Any]:
"""simple docstring"""
if hasattr(_SCREAMING_SNAKE_CASE , "_argument_group_name"):
__lowerCAmelCase : List[Any] = self.add_argument_group(dtype._argument_group_name)
else:
__lowerCAmelCase : Union[str, Any] = self
try:
__lowerCAmelCase : Dict[str, type] = get_type_hints(_SCREAMING_SNAKE_CASE)
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)")
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[Any] = ".".join(map(_SCREAMING_SNAKE_CASE , sys.version_info[:3]))
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`.") from ex
raise
for field in dataclasses.fields(_SCREAMING_SNAKE_CASE):
if not field.init:
continue
__lowerCAmelCase : List[Any] = type_hints[field.name]
self._parse_dataclass_field(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: List[Any]=False , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: str=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
__lowerCAmelCase : str = []
if args_filename:
args_files.append(Path(_SCREAMING_SNAKE_CASE))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix(".args"))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowerCAmelCase : Optional[int] = ArgumentParser()
args_file_parser.add_argument(_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , action="append")
# Use only remaining args for further parsing (remove the args_file_flag)
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = args_file_parser.parse_known_args(args=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vars(_SCREAMING_SNAKE_CASE).get(args_file_flag.lstrip("-") , _SCREAMING_SNAKE_CASE)
if cmd_args_file_paths:
args_files.extend([Path(_SCREAMING_SNAKE_CASE) for p in cmd_args_file_paths])
__lowerCAmelCase : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowerCAmelCase : int = file_args + args if args is not None else file_args + sys.argv[1:]
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.parse_known_args(args=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = []
for dtype in self.dataclass_types:
__lowerCAmelCase : int = {f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE) if f.init}
__lowerCAmelCase : Tuple = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE).items() if k in keys}
for k in keys:
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = dtype(**_SCREAMING_SNAKE_CASE)
outputs.append(_SCREAMING_SNAKE_CASE)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(_SCREAMING_SNAKE_CASE)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""")
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict[str, Any] , _SCREAMING_SNAKE_CASE: bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowerCAmelCase : str = set(args.keys())
__lowerCAmelCase : str = []
for dtype in self.dataclass_types:
__lowerCAmelCase : Optional[int] = {f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE) if f.init}
__lowerCAmelCase : Dict = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
__lowerCAmelCase : List[str] = dtype(**_SCREAMING_SNAKE_CASE)
outputs.append(_SCREAMING_SNAKE_CASE)
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_SCREAMING_SNAKE_CASE)}""")
return tuple(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(_SCREAMING_SNAKE_CASE) , encoding="utf-8") as open_json_file:
__lowerCAmelCase : Any = json.loads(open_json_file.read())
__lowerCAmelCase : str = self.parse_dict(_SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE)
return tuple(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.parse_dict(yaml.safe_load(Path(_SCREAMING_SNAKE_CASE).read_text()) , allow_extra_keys=_SCREAMING_SNAKE_CASE)
return tuple(_SCREAMING_SNAKE_CASE) | 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: int=10 , _SCREAMING_SNAKE_CASE: Tuple=18 , _SCREAMING_SNAKE_CASE: Union[str, Any]=30 , _SCREAMING_SNAKE_CASE: Any=400 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Any=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 18}
__lowerCAmelCase : int = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Tuple = min_resolution
__lowerCAmelCase : Tuple = max_resolution
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : Dict = image_mean
__lowerCAmelCase : List[Any] = image_std
__lowerCAmelCase : List[Any] = crop_size
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VivitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VivitImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size"))
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
__lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
__lowerCAmelCase : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , Image.Image)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , np.ndarray)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : List[str] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , torch.Tensor)
# Test not batched input
__lowerCAmelCase : List[str] = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : Any = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 58 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _A (__a , __a ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _A (__a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : List[str] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : int = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _A (__a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : Optional[Any] = ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = parquet_path
elif issubclass(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = [parquet_path]
SCREAMING_SNAKE_CASE_ : Dict = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : Optional[Any] = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def _A (__a , __a , __a=("train",) ) -> Any:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
SCREAMING_SNAKE_CASE_ : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _A (__a , __a , __a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : int = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _A (__a , __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : List[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = ParquetDatasetReader({'''train''': parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE_ : Dict = '''train'''
SCREAMING_SNAKE_CASE_ : List[str] = {'''train''': parquet_path, '''test''': parquet_path}
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : Tuple = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE_ : Any = pq.ParquetFile(tmp_path / '''foo.parquet''' )
SCREAMING_SNAKE_CASE_ : Tuple = pf.read()
assert dataset.data.table == output_table
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = str(shared_datadir / '''test_image_rgb.jpg''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''image''': [image_path]}
SCREAMING_SNAKE_CASE_ : Dict = Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE_ : Optional[int] = Dataset.from_dict(__a , features=__a )
SCREAMING_SNAKE_CASE_ : Any = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE_ : Any = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE_ : str = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _A (__a , __a ) -> str:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 91 |
"""simple docstring"""
import random
from typing import Any
def _A (__a ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 91 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_UpperCamelCase = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt'''}
_UpperCamelCase = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
_UpperCamelCase = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
_UpperCamelCase = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] = ConvBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Dict = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Union[str, Any] = do_lower_case
__UpperCAmelCase : str = strip_accents
__UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars
__UpperCAmelCase : List[Any] = normalizer_class(**__UpperCAmelCase )
__UpperCAmelCase : List[Any] = do_lower_case
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 16 | 0 |
def A ( _SCREAMING_SNAKE_CASE ) -> int:
assert (
isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCamelCase , lowerCamelCase : Dict = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase , lowerCamelCase : Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case_ ( __A ):
__A : str = ["pixel_values"]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Dict = do_resize
lowercase__ : List[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Dict = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray:
lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 87 | 0 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
lowerCAmelCase__ : Optional[Any] = False
if num < 0:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[Any] = -num
lowerCAmelCase__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(__UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowerCamelCase :
def __init__( self : str , UpperCamelCase : int , UpperCamelCase : str=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Dict=7 , UpperCamelCase : List[Any]=9 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=32 , UpperCamelCase : str=5 , UpperCamelCase : int=4 , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : Tuple=8 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=0.002 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Dict=None , UpperCamelCase : str=None , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = encoder_seq_length
lowerCAmelCase__ : Any = decoder_seq_length
# For common tests
lowerCAmelCase__ : Union[str, Any] = self.decoder_seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_attention_mask
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : int = d_ff
lowerCAmelCase__ : int = relative_attention_num_buckets
lowerCAmelCase__ : Union[str, Any] = dropout_rate
lowerCAmelCase__ : str = initializer_factor
lowerCAmelCase__ : Tuple = eos_token_id
lowerCAmelCase__ : List[str] = pad_token_id
lowerCAmelCase__ : str = decoder_start_token_id
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = decoder_layers
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : int=None , UpperCamelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ : int = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Tuple = self.get_config()
lowerCAmelCase__ : Dict = config.num_attention_heads
lowerCAmelCase__ : Dict = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, input_dict
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Any , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = UMTaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : Optional[int] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
lowerCAmelCase__ : List[Any] = result.last_hidden_state
lowerCAmelCase__ : Any = result.past_key_values
lowerCAmelCase__ : str = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
# first forward pass
lowerCAmelCase__ : Optional[int] = model(UpperCamelCase , use_cache=UpperCamelCase )
lowerCAmelCase__ : Any = model(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : List[str] = model(UpperCamelCase )["""last_hidden_state"""]
lowerCAmelCase__ : Any = model(UpperCamelCase , past_key_values=UpperCamelCase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase__ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int , ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval()
lowerCAmelCase__ : Dict = model(**UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() )
@require_torch
class _lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
_lowerCamelCase :List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCamelCase :List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCamelCase :Optional[Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCamelCase :Dict = True
_lowerCamelCase :Optional[Any] = False
_lowerCamelCase :List[str] = False
_lowerCamelCase :Dict = True
_lowerCamelCase :str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCamelCase :Optional[int] = [0.8, 0.9]
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = config_and_inputs[0]
lowerCAmelCase__ : int = UMTaForConditionalGeneration(UpperCamelCase ).eval()
model.to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
}
for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ):
lowerCAmelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase__ : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase__ : str = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=UpperCamelCase , legacy=UpperCamelCase )
lowerCAmelCase__ : int = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCAmelCase__ : Union[str, Any] = tokenizer(UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase ).input_ids
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model.generate(input_ids.to(UpperCamelCase ) )
lowerCAmelCase__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCAmelCase__ : Any = tokenizer.batch_decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 212 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a_ = 'examples/'
a_ = {
'examples': (re.compile(r'^check_min_version\(\"[^\"]+\"\)\s*$', re.MULTILINE), 'check_min_version(\"VERSION\")\n'),
'init': (re.compile(r'^__version__\s+=\s+\"([^\"]+)\"\s*$', re.MULTILINE), '__version__ = \"VERSION\"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*\"[^\"]+\",', re.MULTILINE), r'\1version=\"VERSION\",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*\"[^\"]+\"$', re.MULTILINE), 'release = \"VERSION\"\n'),
}
a_ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
a_ = 'README.md'
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase : Union[str, Any] = f.read()
__lowercase : Dict = REPLACE_PATTERNS[pattern]
__lowercase : Tuple = replace.replace('''VERSION''' , __SCREAMING_SNAKE_CASE )
__lowercase : Any = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __UpperCamelCase ):
for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern='''examples''' )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( ):
__lowercase : Optional[Any] = "🤗 Transformers currently provides the following architectures"
__lowercase : List[str] = "1. Want to contribute a new model?"
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase : int = f.readlines()
# Find the start of the list.
__lowercase : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowercase : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__lowercase : int = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__lowercase : int = f.read()
__lowercase : List[str] = REPLACE_PATTERNS["init"][0].search(__SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __UpperCamelCase=False ):
__lowercase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__lowercase : Optional[int] = default_version.base_version
elif patch:
__lowercase : List[str] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowercase : Tuple = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowercase : Tuple = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__lowercase : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( ):
__lowercase : Tuple = get_version()
__lowercase : str = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowercase : Tuple = current_version.base_version
# Check with the user we got that right.
__lowercase : List[str] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__lowercase : Optional[Any] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__SCREAMING_SNAKE_CASE )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
a_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 249 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = metric_id
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = [MetricMock(__snake_case ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "tmp_path" in args:
__lowerCAmelCase: Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*__SCREAMING_SNAKE_CASE )
| 217 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__magic_name__ = 500000
__magic_name__ = os.path.split(__file__)
__magic_name__ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _lowerCAmelCase ( A__: Tuple , **A__: Any ):
'''simple docstring'''
UpperCAmelCase = dataset.map(**_UpperCAmelCase )
@get_duration
def _lowerCAmelCase ( A__: str , **A__: str ):
'''simple docstring'''
UpperCAmelCase = dataset.filter(**_UpperCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase = generate_example_dataset(
os.path.join(_UpperCAmelCase , '''dataset.arrow''' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase )
UpperCAmelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_UpperCAmelCase )
def tokenize(A__: Any ):
return tokenizer(examples['''text'''] )
UpperCAmelCase = map(_UpperCAmelCase )
UpperCAmelCase = map(_UpperCAmelCase , batched=_UpperCAmelCase )
UpperCAmelCase = map(_UpperCAmelCase , function=lambda A__ : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='''numpy''' ):
UpperCAmelCase = map(_UpperCAmelCase , function=lambda A__ : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='''pandas''' ):
UpperCAmelCase = map(_UpperCAmelCase , function=lambda A__ : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCAmelCase = map(_UpperCAmelCase , function=lambda A__ : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCAmelCase = map(_UpperCAmelCase , function=lambda A__ : None , batched=_UpperCAmelCase )
UpperCAmelCase = map(_UpperCAmelCase , function=_UpperCAmelCase , batched=_UpperCAmelCase )
UpperCAmelCase = filter(_UpperCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCAmelCase , '''wb''' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 365 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__magic_name__ = "Usage of script: script_name <size_of_canvas:int>"
__magic_name__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = [[False for i in range(A__ )] for j in range(A__ )]
return canvas
def _lowerCAmelCase ( A__: list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(A__ ):
for j, _ in enumerate(A__ ):
UpperCAmelCase = bool(random.getrandbits(1 ) )
def _lowerCAmelCase ( A__: list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase = np.array(A__ )
UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(A__ ):
for c, pt in enumerate(A__ ):
UpperCAmelCase = __judge_point(
A__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase ( A__: bool , A__: list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase = pt
if pt:
if alive < 2:
UpperCAmelCase = False
elif alive == 2 or alive == 3:
UpperCAmelCase = True
elif alive > 3:
UpperCAmelCase = False
else:
if alive == 3:
UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__magic_name__ = int(sys.argv[1])
# main working structure of this module.
__magic_name__ = create_canvas(canvas_size)
seed(c)
__magic_name__ , __magic_name__ = plt.subplots()
fig.show()
__magic_name__ = ListedColormap(["w", "k"])
try:
while True:
__magic_name__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 152 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 164 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _A ( lowercase__ ):
return "".join(sorted(lowercase__ ) )
def _A ( lowercase__ ):
return word_by_signature[signature(lowercase__ )]
__A = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__A = sorted({word.strip().lower() for word in data.splitlines()})
__A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 164 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
__A = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Any = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
a__ : Optional[Any] = BartTokenizer
def __init__( self : Tuple , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict="replace" , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : str="<pad>" , __UpperCAmelCase : str="<mask>" , __UpperCAmelCase : str=False , __UpperCAmelCase : Dict=True , **__UpperCAmelCase : List[str] , ) -> Optional[int]:
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase_= getattr(__UpperCAmelCase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase_= add_prefix_space
UpperCAmelCase_= pre_tok_class(**__UpperCAmelCase )
UpperCAmelCase_= add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_= """post_processor"""
UpperCAmelCase_= getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase_= json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_= tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase_= tuple(state["""cls"""] )
UpperCAmelCase_= False
if state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase_= add_prefix_space
UpperCAmelCase_= True
if state.get("""trim_offsets""" , __UpperCAmelCase ) != trim_offsets:
UpperCAmelCase_= trim_offsets
UpperCAmelCase_= True
if changes_to_apply:
UpperCAmelCase_= getattr(__UpperCAmelCase , state.pop("""type""" ) )
UpperCAmelCase_= component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> Tuple:
UpperCAmelCase_= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
UpperCAmelCase_= value
def _SCREAMING_SNAKE_CASE ( self : Tuple , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> BatchEncoding:
UpperCAmelCase_= kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : str ) -> BatchEncoding:
UpperCAmelCase_= kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase_= self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : List[Any]=None ) -> int:
UpperCAmelCase_= [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_= [self.sep_token_id]
UpperCAmelCase_= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 277 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''CLIPFeatureExtractor''']
__A = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 277 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __lowercase ( __lowerCAmelCase : Optional[int] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a__ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
if k.startswith('encoder' ):
a__ = k.replace('.attn' , '.self_attn' )
a__ = k.replace('norm1' , 'self_attn_layer_norm' )
a__ = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
a__ = k.replace('norm1' , 'self_attn_layer_norm' )
a__ = k.replace('norm2' , 'encoder_attn_layer_norm' )
a__ = k.replace('norm3' , 'final_layer_norm' )
return k
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
a__ = sd.pop(__lowerCAmelCase )
a__ = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
a__ = v
snake_case : int = ['''START''']
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = model['model']
a__ = BlenderbotConfig.from_json_file(__lowerCAmelCase )
a__ = BlenderbotForConditionalGeneration(__lowerCAmelCase )
a__ = m.model.state_dict().keys()
a__ = []
a__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a__ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCAmelCase )
m.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
m.half()
m.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
snake_case : int = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 240 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
snake_case : List[Any] = logging.getLogger(__name__)
def __lowercase ( __lowerCAmelCase : str=2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : int = 1_0 , __lowerCAmelCase : int = 2 ):
def get_dataset(__lowerCAmelCase : Dict ):
a__ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
a__ = get_dataset(__lowerCAmelCase )
a__ = get_dataset(__lowerCAmelCase )
a__ = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
a__ = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=None ):
a__ = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
a__ , a__ = batch
a__ = model(__lowerCAmelCase )
a__ = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case_ (nn.Module ):
def __init__( self :Any ) -> Union[str, Any]:
super().__init__()
a__ = nn.Parameter(torch.randn(1 ) )
a__ = nn.Parameter(torch.randn(1 ) )
def lowerCamelCase__( self :List[str] ,__snake_case :Union[str, Any] ) -> str:
return x * self.a + self.b
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(total_limit=1 ,project_dir=__snake_case ,automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def lowerCamelCase__( self :List[Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
# Train baseline
a__ = Accelerator()
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
a__ = os.path.join(__snake_case ,'initial' )
accelerator.save_state(__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
a__ = train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = Accelerator()
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
accelerator.load_state(__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
a__ = train(2 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save everything
a__ = os.path.join(__snake_case ,'checkpoint' )
accelerator.save_state(__snake_case )
# Load everything back in and make sure all states work
accelerator.load_state(__snake_case )
test_rands += train(1 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
def lowerCamelCase__( self :str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
a__ = train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__snake_case )
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
a__ = train(2 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_1' ) )
test_rands += train(1 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = torch.tensor([1, 2, 3] )
a__ = torch.tensor([2, 3, 4] )
a__ = DummyModel()
a__ = torch.optim.Adam(net.parameters() )
a__ = Accelerator()
with self.assertRaises(__snake_case ) as ve:
accelerator.register_for_checkpointing(__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def lowerCamelCase__( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ = torch.optim.lr_scheduler.StepLR(__snake_case ,step_size=1 ,gamma=0.99 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
a__ = scheduler.state_dict()
train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
self.assertNotEqual(__snake_case ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) )
self.assertEqual(__snake_case ,scheduler.state_dict() )
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case ,total_limit=2 )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ = accelerator.prepare(__snake_case )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_10' ) ) )
@require_cuda
def lowerCamelCase__( self :Dict ) -> str:
a__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
if __name__ == "__main__":
snake_case : Tuple = '''/tmp/accelerate/state_checkpointing'''
snake_case : str = DummyModel()
snake_case : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
snake_case : Union[str, Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
snake_case , snake_case : str = dummy_dataloaders()
snake_case : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
snake_case : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
snake_case , snake_case , snake_case , snake_case , snake_case : List[str] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
snake_case , snake_case : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
snake_case : Any = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
snake_case : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
snake_case : int = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
snake_case : Optional[int] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 240 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__snake_case = {
'''facebook/xglm-564M''': 2048,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase__ :List[str] = 7
UpperCamelCase__ :List[str] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
UpperCamelCase__ :Optional[int] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase__ :Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ :Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ :int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase__ :Optional[Any] = len(self.sp_model )
UpperCamelCase__ :str = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCamelCase_ )
UpperCamelCase__ :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.__dict__.copy()
UpperCamelCase__ :List[Any] = None
UpperCamelCase__ :List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Tuple = {}
UpperCamelCase__ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ ))
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ :Tuple = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,) | 352 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a ( __a = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( __a = "" ) -> bool:
'''simple docstring'''
if len(__a ) == 0:
return True
UpperCamelCase__ :List[Any] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase__ :Optional[int] = character_freq_dict.get(__a , 0 ) + 1
UpperCamelCase__ :List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( __a = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , __a , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__snake_case = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__snake_case = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 219 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = image.size
lowercase__ , lowercase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
lowercase__ = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
lowercase__ = image[None].transpose(0 , 3 , 1 , 2 )
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class _a ( UpperCamelCase__ ):
def __init__( self: Any , UpperCamelCase_: VQModel , UpperCamelCase_: UNetaDModel , UpperCamelCase_: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self: Dict , UpperCamelCase_: Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase_: Optional[int] = 1 , UpperCamelCase_: Optional[int] = 100 , UpperCamelCase_: Optional[float] = 0.0 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase__ = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase_ )}' )
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowercase__ = preprocess(UpperCamelCase_ )
lowercase__ , lowercase__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ = next(self.unet.parameters() ).dtype
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
lowercase__ = image.to(device=self.device , dtype=UpperCamelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
lowercase__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for t in self.progress_bar(UpperCamelCase_ ):
# concat latents and low resolution image in the channel dimension.
lowercase__ = torch.cat([latents, image] , dim=1 )
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowercase__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VQVAE
lowercase__ = self.vqvae.decode(UpperCamelCase_ ).sample
lowercase__ = torch.clamp(UpperCamelCase_ , -1.0 , 1.0 )
lowercase__ = image / 2 + 0.5
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 110 |
from maths.prime_factors import prime_factors
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :Dict = F'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _a ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, **A ):
'''simple docstring'''
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self, A, **A ):
'''simple docstring'''
return super().__call__(_a, **_a )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE : List[str] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE : Any = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def UpperCamelCase_ ( self, A, A=None, A="This is a sound of {}." ):
'''simple docstring'''
if isinstance(_a, _a ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
SCREAMING_SNAKE_CASE : List[str] = requests.get(_a ).content
else:
with open(_a, 'rb' ) as f:
SCREAMING_SNAKE_CASE : int = f.read()
if isinstance(_a, _a ):
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_read(_a, self.feature_extractor.sampling_rate )
if not isinstance(_a, np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='pt' )
SCREAMING_SNAKE_CASE : List[Any] = candidate_labels
SCREAMING_SNAKE_CASE : Tuple = [hypothesis_template.format(_a ) for x in candidate_labels]
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(_a, return_tensors=self.framework, padding=_a )
SCREAMING_SNAKE_CASE : List[Any] = [text_inputs]
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0], _a ):
SCREAMING_SNAKE_CASE : str = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE : List[str] = text_inputs[0][0]
SCREAMING_SNAKE_CASE : List[str] = self.model(**_a, **_a )
SCREAMING_SNAKE_CASE : Tuple = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = model_outputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE : List[Any] = logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE : str = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
SCREAMING_SNAKE_CASE : Tuple = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_a, _a ), key=lambda A : -x[0] )
]
return result
| 361 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE : List[str] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count ,__UpperCamelCase ) // (node_count + 1)
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
for i in range(1 ,n + 1 ):
result *= i
return result
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 246 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Optional[int] = SwinConfig()
lowerCamelCase__ : Any = swin_name.split("""_""" )
lowerCamelCase__ : str = name_split[1]
lowerCamelCase__ : int = int(name_split[4] )
lowerCamelCase__ : Tuple = int(name_split[3][-1] )
if model_size == "tiny":
lowerCamelCase__ : Any = 96
lowerCamelCase__ : Optional[Any] = (2, 2, 6, 2)
lowerCamelCase__ : Optional[Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase__ : List[Any] = 96
lowerCamelCase__ : Dict = (2, 2, 18, 2)
lowerCamelCase__ : Optional[int] = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase__ : Tuple = 128
lowerCamelCase__ : Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase__ : Optional[int] = (4, 8, 16, 32)
else:
lowerCamelCase__ : int = 192
lowerCamelCase__ : List[str] = (2, 2, 18, 2)
lowerCamelCase__ : Any = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCamelCase__ : int = 21841
else:
lowerCamelCase__ : List[str] = 1000
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Dict = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : str = img_size
lowerCamelCase__ : Any = num_classes
lowerCamelCase__ : List[Any] = embed_dim
lowerCamelCase__ : Union[str, Any] = depths
lowerCamelCase__ : Any = num_heads
lowerCamelCase__ : Dict = window_size
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
if "patch_embed.proj" in name:
lowerCamelCase__ : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ : str = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCamelCase__ : str = """encoder.""" + name
if "attn.proj" in name:
lowerCamelCase__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
lowerCamelCase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase__ : Tuple = """layernorm.bias"""
if "head" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""head""" , """classifier""" )
else:
lowerCamelCase__ : List[str] = """swin.""" + name
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[Any] = orig_state_dict.pop(UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase__ : List[Any] = key.split(""".""" )
lowerCamelCase__ : List[str] = int(key_split[1] )
lowerCamelCase__ : List[Any] = int(key_split[3] )
lowerCamelCase__ : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Any = val[:dim, :]
lowerCamelCase__ : Tuple = val[
dim : dim * 2, :
]
lowerCamelCase__ : Tuple = val[-dim:, :]
else:
lowerCamelCase__ : int = val[
:dim
]
lowerCamelCase__ : Any = val[
dim : dim * 2
]
lowerCamelCase__ : Any = val[
-dim:
]
else:
lowerCamelCase__ : Tuple = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
lowerCamelCase__ : Union[str, Any] = get_swin_config(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = SwinForImageClassification(UpperCamelCase )
model.eval()
lowerCamelCase__ : Optional[int] = convert_state_dict(timm_model.state_dict() , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
lowerCamelCase__ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Tuple = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
lowerCamelCase__ : Union[str, Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Tuple = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = timm_model(inputs["""pixel_values"""] )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : int =parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
from torch import nn
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 358 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class A ( __snake_case ):
__magic_name__ = '''sigmoid'''
__magic_name__ = '''softmax'''
__magic_name__ = '''none'''
@add_end_docstrings(
__snake_case , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class A ( __snake_case ):
__magic_name__ = False
__magic_name__ = ClassificationFunction.NONE
def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Optional[Any] = tokenizer_kwargs
A : int = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
A : int = self.model.config.return_all_scores
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None:
A : Union[str, Any] = top_k
A : Dict = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , )
if return_all_scores:
A : Optional[int] = None
else:
A : Dict = 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A : Any = '''top_k''' not in kwargs
if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
A : List[Any] = self.framework
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
A : Optional[int] = self.model.config.function_to_apply
else:
A : Optional[int] = ClassificationFunction.NONE
A : Any = model_outputs['''logits'''][0]
A : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A : int = sigmoid(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A : Any = softmax(SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
A : int = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A : int = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )
if top_k is not None:
A : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 311 | 0 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
return round(float(moles / volume ) * nfactor )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
a_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : int , *__lowercase : List[str] , **__lowercase : Dict ) -> List[str]:
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , '''decord''' )
self.check_model_type(__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any]=None , __lowercase : str=None , __lowercase : List[Any]=None ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] ={}
if frame_sampling_rate is not None:
SCREAMING_SNAKE_CASE__ : Tuple =frame_sampling_rate
if num_frames is not None:
SCREAMING_SNAKE_CASE__ : int =num_frames
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
if top_k is not None:
SCREAMING_SNAKE_CASE__ : int =top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , __lowercase : Union[str, List[str]] , **__lowercase : Any ) -> Optional[int]:
return super().__call__(__lowercase , **__lowercase )
def __magic_name__ ( self : int , __lowercase : str , __lowercase : List[str]=None , __lowercase : Optional[Any]=1 ) -> Dict:
if num_frames is None:
SCREAMING_SNAKE_CASE__ : Any =self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
SCREAMING_SNAKE_CASE__ : Tuple =BytesIO(requests.get(__lowercase ).content )
SCREAMING_SNAKE_CASE__ : Optional[int] =VideoReader(__lowercase )
videoreader.seek(0 )
SCREAMING_SNAKE_CASE__ : Dict =0
SCREAMING_SNAKE_CASE__ : Dict =num_frames * frame_sampling_rate - 1
SCREAMING_SNAKE_CASE__ : List[Any] =np.linspace(__lowercase , __lowercase , num=__lowercase , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : List[str] =videoreader.get_batch(__lowercase ).asnumpy()
SCREAMING_SNAKE_CASE__ : str =list(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.image_processor(__lowercase , return_tensors=self.framework )
return model_inputs
def __magic_name__ ( self : Dict , __lowercase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =self.model(**__lowercase )
return model_outputs
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : List[Any]=5 ) -> int:
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE__ : int =self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : int =model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =probs.topk(__lowercase )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
SCREAMING_SNAKE_CASE__ : Any =scores.tolist()
SCREAMING_SNAKE_CASE__ : Dict =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowercase , __lowercase )]
| 352 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : List[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : str=False ) -> int:
if return_pvalue:
SCREAMING_SNAKE_CASE__ : List[str] =pearsonr(__lowercase , __lowercase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowercase , __lowercase )[0] )} | 222 | 0 |
from math import isqrt
def lowerCAmelCase__ ( a__: int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCAmelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__: int = 1_0**8 ) -> int:
'''simple docstring'''
_UpperCAmelCase = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = len(a__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__magic_name__ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__magic_name__ = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
__magic_name__ = "|".join(sys.argv[1:])
__magic_name__ = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__magic_name__ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 152 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Dict ) -> Union[str, Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCamelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCamelCase = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
self.assertTrue(isinstance(dc.token_ids , SCREAMING_SNAKE_CASE__ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : Tuple ) -> Union[str, Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCamelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ ) # fails here
def __A ( self : int ) -> Any:
__lowerCamelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCamelCase = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
__lowerCamelCase = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(3 )
__lowerCamelCase = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : int ) -> Optional[int]:
__lowerCamelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCamelCase = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 270 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ = 3
def _lowerCAmelCase ( lowercase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
UpperCAmelCase = primitive_root(lowercase_ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 181 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : int = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] ="""gptj"""
lowerCamelCase : str ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=5_0400 , lowerCAmelCase__=2048 , lowerCAmelCase__=4096 , lowerCAmelCase__=28 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__=None , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=5_0256 , lowerCAmelCase__=5_0256 , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Tuple:
a : List[Any] = vocab_size
a : Tuple = n_positions
a : Optional[Any] = n_embd
a : Any = n_layer
a : Tuple = n_head
a : Optional[Any] = n_inner
a : Dict = rotary_dim
a : Optional[int] = activation_function
a : Any = resid_pdrop
a : Optional[Any] = embd_pdrop
a : Union[str, Any] = attn_pdrop
a : Optional[int] = layer_norm_epsilon
a : Union[str, Any] = initializer_range
a : Optional[Any] = use_cache
a : List[str] = bos_token_id
a : List[Any] = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = "default" , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ , task=lowerCAmelCase__ , patching_specs=lowerCAmelCase__ , use_past=lowerCAmelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCAmelCase__ ):
# TODO: how to do that better?
a : Optional[int] = 0
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
a : Any = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
a : int = {0: "batch", 1: "past_sequence + sequence"}
else:
a : Union[str, Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __a ( self ) -> int:
return self._config.n_layer
@property
def __a ( self ) -> int:
return self._config.n_head
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
a : Optional[int] = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
a : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a, a : List[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a : Optional[int] = seqlen + 2
a : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a : List[str] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
a : List[Any] = common_inputs["attention_mask"]
if self.use_past:
a : int = ordered_inputs["attention_mask"].dtype
a : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def __a ( self ) -> int:
return 13
| 105 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a : Union[str, Any] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Dict = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=_lowercase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=_lowercase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=_lowercase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=_lowercase , default="data/dump" , help="The dump file prefix." )
a : Dict = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
a : Optional[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
a : str = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
a : List[str] = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
a : Tuple = RobertaTokenizer.from_pretrained(args.tokenizer_name )
a : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `<s>`
a : str = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
a : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
a : Optional[int] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
a : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
a : List[Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(_lowercase )} examples to process.""" )
a : Optional[Any] = []
a : Optional[Any] = 0
a : int = 1_0000
a : Dict = time.time()
for text in data:
a : List[Any] = F"""{bos} {text.strip()} {sep}"""
a : Optional[int] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
rslt.append(_lowercase )
iter += 1
if iter % interval == 0:
a : Optional[Any] = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
a : Optional[Any] = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(_lowercase )} examples processed.""" )
a : Optional[int] = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
a : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
a : Optional[int] = [np.uintaa(_lowercase ) for d in rslt]
else:
a : Optional[Any] = [np.intaa(_lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(_lowercase , "wb" ) as handle:
pickle.dump(rslt_ , _lowercase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 105 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self ):
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3 ) )
| 11 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase( a__ ):
lowercase__ = 'SpeechT5FeatureExtractor'
lowercase__ = 'SpeechT5Tokenizer'
def __init__( self , __a , __a) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_)
def __call__( self , *__a , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''audio''' , UpperCamelCase_)
_UpperCamelCase = kwargs.pop('''text''' , UpperCamelCase_)
_UpperCamelCase = kwargs.pop('''text_target''' , UpperCamelCase_)
_UpperCamelCase = kwargs.pop('''audio_target''' , UpperCamelCase_)
_UpperCamelCase = kwargs.pop('''sampling_rate''' , UpperCamelCase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
_UpperCamelCase = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_)
elif text is not None:
_UpperCamelCase = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_)
else:
_UpperCamelCase = None
if audio_target is not None:
_UpperCamelCase = self.feature_extractor(audio_target=UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_)
_UpperCamelCase = targets['''input_values''']
elif text_target is not None:
_UpperCamelCase = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_)
_UpperCamelCase = targets['''input_ids''']
else:
_UpperCamelCase = None
if inputs is None:
return targets
if targets is not None:
_UpperCamelCase = labels
_UpperCamelCase = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
_UpperCamelCase = decoder_attention_mask
return inputs
def UpperCAmelCase ( self , *__a , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''input_values''' , UpperCamelCase_)
_UpperCamelCase = kwargs.pop('''input_ids''' , UpperCamelCase_)
_UpperCamelCase = kwargs.pop('''labels''' , UpperCamelCase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
_UpperCamelCase = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
elif input_ids is not None:
_UpperCamelCase = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_)
else:
_UpperCamelCase = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCamelCase_ , UpperCamelCase_) and "input_ids" in labels[0]):
_UpperCamelCase = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_)
_UpperCamelCase = targets['''input_ids''']
else:
_UpperCamelCase = self.feature_extractor.feature_size
_UpperCamelCase = self.feature_extractor.num_mel_bins
_UpperCamelCase = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
_UpperCamelCase = feature_size_hack
_UpperCamelCase = targets['''input_values''']
else:
_UpperCamelCase = None
if inputs is None:
return targets
if targets is not None:
_UpperCamelCase = labels
_UpperCamelCase = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
_UpperCamelCase = decoder_attention_mask
return inputs
def UpperCAmelCase ( self , *__a , **__a) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_)
def UpperCAmelCase ( self , *__a , **__a) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_)
| 194 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class snake_case_:
def __init__( self : str , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None ):
# Input as list
lowerCAmelCase : str = list(poly_a or [0] )[:]
lowerCAmelCase : Any = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase : Optional[int] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase : Union[str, Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase : int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase : int = self.__multiply()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str ):
lowerCAmelCase : Optional[Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase_ ) <= 1:
return dft[0]
#
lowerCAmelCase : Tuple = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase : Dict = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : List[Any] = self.root**next_ncol
# First half of next step
lowerCAmelCase : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase : Optional[Any] = new_dft
lowerCAmelCase : Union[str, Any] = next_ncol // 2
return dft[0]
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.__dft('''A''' )
lowerCAmelCase : Optional[int] = self.__dft('''B''' )
lowerCAmelCase : Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase : str = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase : Union[str, Any] = [[] for i in range(UpperCamelCase_ )]
lowerCAmelCase : Optional[Any] = self.root ** (next_ncol // 2)
lowerCAmelCase : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ):
lowerCAmelCase : int = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase : str = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase : int = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Tuple:
lowerCAmelCase__ = WavaVecaForSequenceClassification.from_pretrained(A , config=A )
lowerCAmelCase__ = downstream_dict['''projector.weight''']
lowerCAmelCase__ = downstream_dict['''projector.bias''']
lowerCAmelCase__ = downstream_dict['''model.post_net.linear.weight''']
lowerCAmelCase__ = downstream_dict['''model.post_net.linear.bias''']
return model
def _snake_case ( A , A , A ) -> List[str]:
lowerCAmelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A )
lowerCAmelCase__ = downstream_dict['''model.linear.weight''']
lowerCAmelCase__ = downstream_dict['''model.linear.bias''']
return model
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = WavaVecaForXVector.from_pretrained(A , config=A )
lowerCAmelCase__ = downstream_dict['''connector.weight''']
lowerCAmelCase__ = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase__ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCAmelCase__ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowerCAmelCase__ = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _snake_case ( A , A , A , A ) -> int:
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )
lowerCAmelCase__ = checkpoint['''Downstream''']
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(A )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(
A , return_attention_mask=A , do_normalize=A )
lowerCAmelCase__ = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowerCAmelCase__ = convert_classification(A , A , A )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowerCAmelCase__ = convert_diarization(A , A , A )
elif arch.endswith('''ForXVector''' ):
lowerCAmelCase__ = convert_xvector(A , A , A )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase__ = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__UpperCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 228 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = ""
lowercase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ : str = None # compression type in fsspec. ex: "gzip"
lowercase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowerCamelCase_ = "" , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ ) -> Any:
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase__ = fsspec.open(
lowerCamelCase_ , mode='''rb''' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase__ = None
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase_ ).lstrip('''/''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.dir_cache is None:
lowerCAmelCase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCAmelCase__ = {f['''name''']: f}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
return self.file.open().read()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[str]:
lowerCAmelCase__ = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = "bz2"
lowercase__ : str = "bz2"
lowercase__ : Optional[int] = ".bz2"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "gzip"
lowercase__ : int = "gzip"
lowercase__ : int = ".gz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Tuple = "lz4"
lowercase__ : Optional[Any] = "lz4"
lowercase__ : int = ".lz4"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = "xz"
lowercase__ : str = "xz"
lowercase__ : List[Any] = ".xz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Dict = ".zst"
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase__ = self.file.__enter__
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = file_
def __enter__( self ) -> Tuple:
self._file.__enter__()
return self
def __exit__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self ) -> Any:
return iter(self._file )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return next(self._file )
def __getattr__( self , lowerCamelCase_ ) -> str:
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ , **lowerCamelCase_ ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
lowerCAmelCase__ = fixed_enter | 228 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def __lowercase ( _a = 1_500_000 ):
snake_case_ : Union[str, Any] = defaultdict(__A )
snake_case_ : Dict = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
snake_case_ : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 | 0 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case__ ( lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None ) -> Any:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field(
metadata={'help': 'The csv file to plot.'}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Disable logarithmic scale when plotting'}, )
_lowerCAmelCase = field(
default=a__, metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
}, )
_lowerCAmelCase = field(
default=a__, metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'}, )
_lowerCAmelCase = list_field(
default=a__, metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def snake_case__ ( lowerCamelCase__ : Tuple ) -> Optional[int]:
try:
int(lowerCamelCase__ )
return True
except ValueError:
return False
def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
try:
float(lowerCamelCase__ )
return True
except ValueError:
return False
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Optional[int] = args
A_ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
A_ : Dict = csv.DictReader(_lowerCamelCase )
for row in reader:
A_ : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
A_ : Dict = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
A_ : Optional[Any] = float(row['''result'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ ,A_ : int = plt.subplots()
A_ : Optional[int] = '''Time usage''' if self.args.is_time else '''Memory usage'''
A_ : Optional[int] = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
A_ : int = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
A_ : List[str] = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
A_ : Union[str, Any] = self.result_dict[model_name]['''result''']
((A_) ,(A_)) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
A_ : Union[str, Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
A_ : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCamelCase , )
else:
A_ : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((A_) ,(A_)) : Optional[int] = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
A_ : Union[str, Any] = np.asarray(_lowerCamelCase , _lowerCamelCase )[: len(_lowerCamelCase )]
plt.scatter(
_lowerCamelCase , _lowerCamelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(_lowerCamelCase , _lowerCamelCase , '''--''' )
title_str += f' {label_model_name} vs.'
A_ : List[Any] = title_str[:-4]
A_ : Optional[Any] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_lowerCamelCase )
plt.xlabel(_lowerCamelCase )
plt.ylabel(_lowerCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case__ ( ) -> Optional[int]:
A_ : str = HfArgumentParser(lowerCamelCase__ )
A_ : Tuple = parser.parse_args_into_dataclasses()[0]
A_ : Any = Plot(args=lowerCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
_A : int = BarthezTokenizer
_A : int = BarthezTokenizerFast
_A : Tuple = True
_A : str = True
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase : Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
__lowercase : Any = tokenizer
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : str = "<pad>"
__lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(a_ ) , 101122 )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowercase : Dict = [0, 57, 3018, 70307, 91, 2]
__lowercase : Any = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="""pt""" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowercase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : Any = self.get_tokenizer()
__lowercase : Optional[int] = self.get_rust_tokenizer()
__lowercase : List[str] = "I was born in 92000, and this is falsé."
__lowercase : Tuple = tokenizer.tokenize(a_ )
__lowercase : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__lowercase : str = tokenizer.encode(a_ , add_special_tokens=a_ )
__lowercase : Union[str, Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__lowercase : Union[str, Any] = self.get_rust_tokenizer()
__lowercase : Optional[int] = tokenizer.encode(a_ )
__lowercase : Union[str, Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowercase : Dict = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=a_ , ) | 233 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
a_ : int = 1_0000
a_ : Optional[List[str]] = None
a_ : Optional[datasets.Features] = None
class __lowerCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
a_ : Dict = ParquetConfig
def lowerCamelCase ( self : Optional[Any] ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase ( self : Any , a_ : Optional[Any] ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCAmelCase_ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
lowerCAmelCase_ : str = data_files
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ : str = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCAmelCase_ : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ : List[str] = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , "rb" ) as f:
lowerCAmelCase_ : Dict = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase ( self : int , a_ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase_ : Tuple = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase ( self : Dict , a_ : Dict ):
lowerCAmelCase_ : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , "rb" ) as f:
lowerCAmelCase_ : Any = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase_ : List[str] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(a_ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(a_ )}: {e}''' )
raise
| 241 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
default=__UpperCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__UpperCAmelCase )} )
__A : str = field(
default=__UpperCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
__A : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
__A : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
__A : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__A : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__A : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
__A : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = '''train'''
__A : Tuple = '''dev'''
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : SquadDataTrainingArguments
__A : List[SquadFeatures]
__A : Split
__A : bool
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> str:
'''simple docstring'''
a__ : List[str] = args
a__ : Any = is_language_sensitive
a__ : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase):
try:
a__ : Optional[int] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
a__ : List[Any] = mode
# Load data features from cache or dataset file
a__ : List[str] = 'v2' if args.version_2_with_negative else 'v1'
a__ : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ : List[Any] = cached_features_file + '.lock'
with FileLock(lowercase):
if os.path.exists(lowercase) and not args.overwrite_cache:
a__ : List[Any] = time.time()
a__ : Optional[Any] = torch.load(lowercase)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a__ : List[str] = self.old_features['features']
a__ : int = self.old_features.get('dataset' , lowercase)
a__ : Dict = self.old_features.get('examples' , lowercase)
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run')
else:
if mode == Split.dev:
a__ : Dict = self.processor.get_dev_examples(args.data_dir)
else:
a__ : int = self.processor.get_train_examples(args.data_dir)
a__ , a__ : Any = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
a__ : Tuple = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__( self) -> Tuple:
'''simple docstring'''
return len(self.features)
def __getitem__( self , lowercase) -> Dict[str, torch.Tensor]:
'''simple docstring'''
a__ : Union[str, Any] = self.features[i]
a__ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long)
a__ : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long)
a__ : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long)
a__ : str = torch.tensor(feature.cls_index , dtype=torch.long)
a__ : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float)
a__ : Tuple = torch.tensor(feature.is_impossible , dtype=torch.float)
a__ : str = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask})
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible})
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
a__ : Dict = torch.tensor(feature.start_position , dtype=torch.long)
a__ : str = torch.tensor(feature.end_position , dtype=torch.long)
inputs.update({'start_positions': start_positions, 'end_positions': end_positions})
return inputs
| 225 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase : Optional[Any] = """"""
lowercase : int = """"""
lowercase : List[Any] = """"""
lowercase : Optional[int] = 1 # (0 is vertical, 1 is horizontal)
def A_ ( ) -> None:
a__ , a__ : str = get_dataset(A__ , A__ )
print('Processing...' )
a__ , a__ , a__ : Tuple = update_image_and_anno(A__ , A__ , A__ )
for index, image in enumerate(A__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a__ : int = random_chars(32 )
a__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
a__ : Optional[int] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(A__ )} with {file_name}' )
a__ : List[str] = []
for anno in new_annos[index]:
a__ : Union[str, Any] = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(A__ )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def A_ ( A__ , A__ ) -> tuple[list, list]:
a__ : int = []
a__ : int = []
for label_file in glob.glob(os.path.join(A__ , '*.txt' ) ):
a__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(A__ ) as in_file:
a__ : Tuple = in_file.readlines()
a__ : Dict = os.path.join(A__ , F'{label_name}.jpg' )
a__ : int = []
for obj_list in obj_lists:
a__ : Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def A_ ( A__ , A__ , A__ = 1 ) -> tuple[list, list, list]:
a__ : Optional[int] = []
a__ : Any = []
a__ : Dict = []
for idx in range(len(A__ ) ):
a__ : Optional[int] = []
a__ : Optional[Any] = img_list[idx]
path_list.append(A__ )
a__ : Union[str, Any] = anno_list[idx]
a__ : List[str] = cva.imread(A__ )
if flip_type == 1:
a__ : List[str] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
a__ : Optional[Any] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A__ )
new_imgs_list.append(A__ )
return new_imgs_list, new_annos_lists, path_list
def A_ ( A__ = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 225 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__A : List[Any] = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__A : int = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
__A : Dict = ''' Hello world! cécé herlolip'''
__A : Tuple = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
UpperCAmelCase = dct.pop(__lowercase )
UpperCAmelCase = val
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = emb.weight.shape
UpperCAmelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
UpperCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if not os.path.exists(__lowercase ):
UpperCAmelCase = torch.hub.load('''pytorch/fairseq''' , __lowercase ).eval()
else:
UpperCAmelCase = load_xsum_checkpoint(__lowercase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCAmelCase = checkpoint_path.replace('''.''' , '''-''' )
UpperCAmelCase = BartConfig.from_pretrained(__lowercase )
UpperCAmelCase = bart.encode(__lowercase ).unsqueeze(0 )
UpperCAmelCase = BartTokenizer.from_pretrained(__lowercase ).encode(__lowercase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(__lowercase , __lowercase ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCAmelCase = bart.state_dict()
remove_ignore_keys_(__lowercase )
UpperCAmelCase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
UpperCAmelCase = BartForSequenceClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
UpperCAmelCase = bart.predict('''mnli''' , __lowercase , return_logits=__lowercase )
UpperCAmelCase = model(__lowercase )[0] # logits
else: # no classification heads to worry about
UpperCAmelCase = bart.model.state_dict()
remove_ignore_keys_(__lowercase )
UpperCAmelCase = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase = bart.extract_features(__lowercase )
if hf_checkpoint_name == "facebook/bart-large":
UpperCAmelCase = BartModel(__lowercase ).eval()
model.load_state_dict(__lowercase )
UpperCAmelCase = model(__lowercase ).model[0]
else:
UpperCAmelCase = BartForConditionalGeneration(__lowercase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__lowercase )
if hasattr(__lowercase , '''lm_head''' ):
UpperCAmelCase = make_linear_from_emb(model.model.shared )
UpperCAmelCase = model.model(__lowercase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
__A : Union[str, Any] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 273 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : bool = False ):
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : str = F'''Expected string as input, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = F'''Expected boolean as use_pascal parameter, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
snake_case__ : List[Any] = input_str.split("_" )
snake_case__ : Union[str, Any] = 0 if use_pascal else 1
snake_case__ : str = words[start_index:]
snake_case__ : str = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case__ : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = PhobertTokenizer
a_ = False
def _lowercase ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"]
snake_case__ : int = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Dict = ["#version: 0.2", "l à</w>"]
snake_case__ : Optional[Any] = {"unk_token": "<unk>"}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def _lowercase ( self : List[str] , **__A : Any ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowercase ( self : Tuple , __A : List[Any] ):
snake_case__ : str = "Tôi là VinAI Research"
snake_case__ : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowercase ( self : Optional[int] ):
snake_case__ : int = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = "Tôi là VinAI Research"
snake_case__ : List[Any] = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
snake_case__ : int = tokenizer.tokenize(__A )
print(__A )
self.assertListEqual(__A , __A )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
| 286 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , *,
SCREAMING_SNAKE_CASE_ : int = 4 , SCREAMING_SNAKE_CASE_ : int = 7_68 , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__()
A: Optional[int] = nn.Parameter(torch.zeros(SCREAMING_SNAKE_CASE_ ) )
# parameters for additional clip time embeddings
A: List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# parameters for encoder hidden states
A: Any = clip_extra_context_tokens
A: Optional[int] = nn.Linear(
SCREAMING_SNAKE_CASE_ , self.clip_extra_context_tokens * cross_attention_dim )
A: str = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = nn.LayerNorm(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[Any] , *, SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
A: List[Any] = image_embeddings.shape[0]
A: Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
A: Optional[Any] = classifier_free_guidance_embeddings.expand(
SCREAMING_SNAKE_CASE_ , -1 )
A: Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
A: Dict = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
A: Any = self.embedding_proj(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = self.clip_image_embeddings_project_to_time_embeddings(SCREAMING_SNAKE_CASE_ )
A: int = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
A: List[str] = self.clip_extra_context_tokens_proj(SCREAMING_SNAKE_CASE_ )
A: Any = clip_extra_context_tokens.reshape(SCREAMING_SNAKE_CASE_ , -1 , self.clip_extra_context_tokens )
A: Union[str, Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
A: Tuple = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = self.text_encoder_hidden_states_norm(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 319 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 249 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCamelCase : Optional[Any] = 2_5_6_0_4_7
_lowerCamelCase : List[Any] = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = NllbTokenizer
UpperCamelCase = NllbTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = {}
def __magic_name__ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[Any] = NllbTokenizer(__A, keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = NllbTokenizer(__A, keep_accents=__A )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__A, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ), [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]], )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A, [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
], )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def __magic_name__ ( self : str ):
UpperCAmelCase : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__A, **__A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(__A, **__A )
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(__A )
UpperCAmelCase : Dict = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase : List[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__A, __A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(__A )
UpperCAmelCase : int = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A, __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : int = tempfile.mkdtemp()
UpperCAmelCase : List[Any] = tokenizer_r.save_pretrained(__A, legacy_format=__A )
UpperCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files
self.assertSequenceEqual(__A, __A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(__A )
UpperCAmelCase : List[Any] = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A, __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(__A, legacy_format=__A )
UpperCAmelCase : str = tokenizer_p.save_pretrained(__A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Any = tokenizer_r.from_pretrained(__A )
UpperCAmelCase : Dict = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A, __A ) )
shutil.rmtree(__A )
@require_torch
def __magic_name__ ( self : Optional[int] ):
if not self.test_seqaseq:
return
UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
UpperCAmelCase : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
UpperCAmelCase : int = tokenizer.prepare_seqaseq_batch(
src_texts=__A, tgt_texts=__A, max_length=3, max_target_length=1_0, return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''ron_Latn''', )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 1_0 )
# max_target_length will default to max_length if not specified
UpperCAmelCase : Tuple = tokenizer.prepare_seqaseq_batch(
__A, tgt_texts=__A, max_length=3, return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 3 )
UpperCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=__A, max_length=3, max_target_length=1_0, return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3 )
self.assertNotIn('''decoder_input_ids''', __A )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def __magic_name__ ( self : Dict ):
pass
def __magic_name__ ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Any = [AddedToken('''<special>''', lstrip=__A )]
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A, additional_special_tokens=__A, **__A )
UpperCAmelCase : Dict = tokenizer_r.encode('''Hey this is a <special> token''' )
UpperCAmelCase : Any = tokenizer_r.encode('''<special>''', add_special_tokens=__A )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__A, additional_special_tokens=__A, **__A, )
UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
__A, additional_special_tokens=__A, **__A )
UpperCAmelCase : Union[str, Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
UpperCAmelCase : Union[str, Any] = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(__A, __A )
self.assertEqual(__A, __A )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = """facebook/nllb-200-distilled-600M"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls : Optional[int] ):
UpperCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''eng_Latn''', tgt_lang='''ron_Latn''' )
UpperCAmelCase : Tuple = 1
return cls
def __magic_name__ ( self : List[Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''], 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''], 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''], 2_5_6_0_5_7 )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __A )
def __magic_name__ ( self : List[str] ):
self.assertIn(__A, self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase : Any = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
UpperCAmelCase : Union[str, Any] = self.tokenizer.decode(__A, skip_special_tokens=__A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__A )
self.assertEqual(__A, __A )
self.assertNotIn(self.tokenizer.eos_token, __A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Optional[int] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0], __A )
UpperCAmelCase : Optional[Any] = 1_0
UpperCAmelCase : int = self.tokenizer(__A, max_length=__A, truncation=__A ).input_ids[0]
self.assertEqual(ids[-1], 2 )
self.assertEqual(ids[0], __A )
self.assertEqual(len(__A ), __A )
def __magic_name__ ( self : List[str] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [2_5_6_2_0_3, 3] )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
UpperCAmelCase : Any = NllbTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __A )
@require_torch
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__A, truncation=__A, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(
batch['''labels'''], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(__A, __A )
self.assertEqual((2, 1_5), batch.input_ids.shape )
self.assertEqual((2, 1_5), batch.attention_mask.shape )
UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __A )
self.assertEqual(__A, batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text, padding=__A, truncation=__A, max_length=3, return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text, padding=__A, truncation=__A, max_length=1_0, return_tensors='''pt''' )
UpperCAmelCase : Tuple = targets['''input_ids''']
UpperCAmelCase : List[Any] = shift_tokens_right(
__A, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 1_0 )
@require_torch
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Dict = self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(__A ), {
# A, test, EOS, en_XX
'''input_ids''': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_6_0_5_7,
}, )
@require_torch
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : int = self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
UpperCAmelCase : int = False
UpperCAmelCase : Optional[int] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 336 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
snake_case_ : List[Any] = logging.get_logger(__name__)
class __snake_case ( a ):
def __init__( self : Any , *_snake_case : List[Any] , **_snake_case : int):
"""simple docstring"""
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case)
| 353 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: Any = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Optional[Any] = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Any = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Any = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A =logging.get_logger(__name__)
__A ={
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """deberta-v2"""
def __init__( self : Optional[int] , a_ : List[str]=12_81_00 , a_ : Optional[Any]=15_36 , a_ : Optional[Any]=24 , a_ : List[Any]=24 , a_ : Optional[int]=61_44 , a_ : List[Any]="gelu" , a_ : Any=0.1 , a_ : Tuple=0.1 , a_ : Optional[Any]=5_12 , a_ : Tuple=0 , a_ : Dict=0.0_2 , a_ : Optional[Any]=1e-7 , a_ : List[str]=False , a_ : List[Any]=-1 , a_ : List[str]=0 , a_ : Optional[Any]=True , a_ : List[Any]=None , a_ : Optional[int]=0 , a_ : Tuple="gelu" , **a_ : List[str] , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[int] = relative_attention
__UpperCAmelCase : int = max_relative_positions
__UpperCAmelCase : Any = pad_token_id
__UpperCAmelCase : int = position_biased_input
# Backwards compatibility
if type(a_ ) == str:
__UpperCAmelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__UpperCAmelCase : Tuple = pos_att_type
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : str = kwargs.get('''pooler_hidden_size''' , a_ )
__UpperCAmelCase : Union[str, Any] = pooler_dropout
__UpperCAmelCase : Tuple = pooler_hidden_act
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
@property
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : int = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def snake_case__ ( self : Any , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , a_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=a_ , framework=a_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 226 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Dict = []
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , **lowerCamelCase : Optional[int] ) -> Optional[int]:
self.events.append("""on_init_end""" )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : str , **lowerCamelCase : Dict ) -> List[str]:
self.events.append("""on_train_begin""" )
def __lowercase ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Dict , **lowerCamelCase : Tuple ) -> List[str]:
self.events.append("""on_train_end""" )
def __lowercase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Any:
self.events.append("""on_epoch_begin""" )
def __lowercase ( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Any , **lowerCamelCase : Tuple ) -> Optional[Any]:
self.events.append("""on_epoch_end""" )
def __lowercase ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
self.events.append("""on_step_begin""" )
def __lowercase ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : str , **lowerCamelCase : str ) -> Tuple:
self.events.append("""on_step_end""" )
def __lowercase ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Tuple , **lowerCamelCase : List[str] ) -> Dict:
self.events.append("""on_evaluate""" )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> int:
self.events.append("""on_predict""" )
def __lowercase ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Any , **lowerCamelCase : Optional[Any] ) -> int:
self.events.append("""on_save""" )
def __lowercase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : int , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
self.events.append("""on_log""" )
def __lowercase ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Tuple , **lowerCamelCase : Union[str, Any] ) -> Dict:
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
def __lowercase ( self : Any ) -> List[str]:
shutil.rmtree(self.output_dir )
def __lowercase ( self : Tuple , lowerCamelCase : Tuple=0 , lowerCamelCase : List[str]=0 , lowerCamelCase : Dict=64 , lowerCamelCase : Dict=64 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=False , **lowerCamelCase : Optional[Any] ) -> List[str]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase_ : Optional[Any] = RegressionDataset(length=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = RegressionDataset(length=lowerCamelCase )
lowerCAmelCase_ : int = RegressionModelConfig(a=lowerCamelCase , b=lowerCamelCase )
lowerCAmelCase_ : str = RegressionPreTrainedModel(lowerCamelCase )
lowerCAmelCase_ : str = TrainingArguments(self.output_dir , disable_tqdm=lowerCamelCase , report_to=[] , **lowerCamelCase )
return Trainer(
lowerCamelCase , lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , callbacks=lowerCamelCase , )
def __lowercase ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[str] ) -> List[str]:
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
# Order doesn't matter
lowerCAmelCase_ : Any = sorted(lowerCamelCase , key=lambda lowerCamelCase : cb.__name__ if isinstance(lowerCamelCase , lowerCamelCase ) else cb.__class__.__name__ )
lowerCAmelCase_ : Tuple = sorted(lowerCamelCase , key=lambda lowerCamelCase : cb.__name__ if isinstance(lowerCamelCase , lowerCamelCase ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCamelCase , lowerCamelCase ):
if isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ):
self.assertEqual(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(lowerCamelCase , lowerCamelCase ):
self.assertEqual(lowerCamelCase , cba.__class__ )
elif not isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ):
self.assertEqual(cba.__class__ , lowerCamelCase )
else:
self.assertEqual(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Dict , lowerCamelCase : Union[str, Any] ) -> str:
lowerCAmelCase_ : Optional[Any] = ["""on_init_end""", """on_train_begin"""]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : str = len(trainer.get_eval_dataloader() )
lowerCAmelCase_ : str = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(lowerCamelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowercase ( self : List[Any] ) -> Any:
lowerCAmelCase_ : Dict = self.get_trainer()
lowerCAmelCase_ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase_ : Any = self.get_trainer(disable_tqdm=lowerCamelCase )
lowerCAmelCase_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
def __lowercase ( self : Tuple ) -> Tuple:
lowerCAmelCase_ : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase_ : str = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCamelCase )
expected_callbacks.remove(lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
lowerCAmelCase_ : List[str] = self.get_trainer()
lowerCAmelCase_ : Any = trainer.pop_callback(lowerCamelCase )
self.assertEqual(cb.__class__ , lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
trainer.add_callback(lowerCamelCase )
expected_callbacks.insert(0 , lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
# We can also add, pop, or remove by instance
lowerCAmelCase_ : str = self.get_trainer()
lowerCAmelCase_ : Tuple = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCamelCase )
expected_callbacks.remove(lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.get_trainer()
lowerCAmelCase_ : Tuple = trainer.callback_handler.callbacks[0]
lowerCAmelCase_ : Tuple = trainer.pop_callback(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
trainer.add_callback(lowerCamelCase )
expected_callbacks.insert(0 , lowerCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase )
def __lowercase ( self : str ) -> Union[str, Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase , self.get_expected_events(lowerCamelCase ) )
# Independent log/save/eval
lowerCAmelCase_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase , self.get_expected_events(lowerCamelCase ) )
lowerCAmelCase_ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase , self.get_expected_events(lowerCamelCase ) )
lowerCAmelCase_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowerCAmelCase_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase , self.get_expected_events(lowerCamelCase ) )
lowerCAmelCase_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowerCAmelCase_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase , self.get_expected_events(lowerCamelCase ) )
# A bit of everything
lowerCAmelCase_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowerCAmelCase_ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase , self.get_expected_events(lowerCamelCase ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCAmelCase_ : Tuple = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCamelCase ) in warn_mock.call_args[0][0]
| 89 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 89 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : int = 6_4 ,SCREAMING_SNAKE_CASE__ : int = 2_0 ,SCREAMING_SNAKE_CASE__ : int = 7_6_8 ,SCREAMING_SNAKE_CASE__ : Any=7_7 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : str = "silu" ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "linear" ,SCREAMING_SNAKE_CASE__ : Optional[str] = "prd" ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,):
super().__init__()
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = attention_head_dim
__lowerCamelCase : Tuple = num_attention_heads * attention_head_dim
__lowerCamelCase : List[Any] = additional_embeddings
__lowerCamelCase : List[str] = time_embed_dim or inner_dim
__lowerCamelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
__lowerCamelCase : Optional[int] = clip_embed_dim or embedding_dim
__lowerCamelCase : Tuple = Timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0)
__lowerCamelCase : int = TimestepEmbedding(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,out_dim=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if embedding_proj_norm_type is None:
__lowerCamelCase : List[Any] = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase : Any = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
__lowerCamelCase : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if encoder_hid_proj_type is None:
__lowerCamelCase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase : Any = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
__lowerCamelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,SCREAMING_SNAKE_CASE__))
if added_emb_type == "prd":
__lowerCamelCase : int = nn.Parameter(torch.zeros(1 ,1 ,SCREAMING_SNAKE_CASE__))
elif added_emb_type is None:
__lowerCamelCase : Dict = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.")
__lowerCamelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dropout=SCREAMING_SNAKE_CASE__ ,activation_fn='gelu' ,attention_bias=SCREAMING_SNAKE_CASE__ ,)
for d in range(SCREAMING_SNAKE_CASE__)
])
if norm_in_type == "layer":
__lowerCamelCase : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
elif norm_in_type is None:
__lowerCamelCase : Tuple = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}.")
__lowerCamelCase : Any = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10000.0)
causal_attention_mask.triu_(1)
__lowerCamelCase : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,SCREAMING_SNAKE_CASE__ ,persistent=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Dict[str, AttentionProcessor]):
if hasattr(SCREAMING_SNAKE_CASE__ ,'set_processor'):
__lowerCamelCase : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return processors
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
__lowerCamelCase : List[Any] = len(self.attn_processors.keys())
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) and len(SCREAMING_SNAKE_CASE__) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE__)} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Tuple):
if hasattr(SCREAMING_SNAKE_CASE__ ,'set_processor'):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
module.set_processor(SCREAMING_SNAKE_CASE__)
else:
module.set_processor(processor.pop(F"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
self.set_attn_processor(AttnProcessor())
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, float, int] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.BoolTensor] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = hidden_states.shape[0]
__lowerCamelCase : int = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device)
elif torch.is_tensor(SCREAMING_SNAKE_CASE__) and len(timesteps.shape) == 0:
__lowerCamelCase : int = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : str = timesteps * torch.ones(SCREAMING_SNAKE_CASE__ ,dtype=timesteps.dtype ,device=timesteps.device)
__lowerCamelCase : Optional[int] = self.time_proj(SCREAMING_SNAKE_CASE__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase : str = timesteps_projected.to(dtype=self.dtype)
__lowerCamelCase : Dict = self.time_embedding(SCREAMING_SNAKE_CASE__)
if self.embedding_proj_norm is not None:
__lowerCamelCase : Optional[Any] = self.embedding_proj_norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.embedding_proj(SCREAMING_SNAKE_CASE__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase : Optional[Any] = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set')
__lowerCamelCase : Optional[Any] = self.proj_in(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.positional_embedding.to(hidden_states.dtype)
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(SCREAMING_SNAKE_CASE__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
__lowerCamelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
__lowerCamelCase : int = hidden_states[:, None, :]
__lowerCamelCase : List[str] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase : Any = self.prd_embedding.to(hidden_states.dtype).expand(SCREAMING_SNAKE_CASE__ ,-1 ,-1)
additional_embeds.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = torch.cat(
SCREAMING_SNAKE_CASE__ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase : Any = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase : Tuple = F.pad(
SCREAMING_SNAKE_CASE__ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
__lowerCamelCase : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase : Dict = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
__lowerCamelCase : int = F.pad(SCREAMING_SNAKE_CASE__ ,(0, self.additional_embeddings) ,value=0.0)
__lowerCamelCase : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
__lowerCamelCase : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0)
if self.norm_in is not None:
__lowerCamelCase : Union[str, Any] = self.norm_in(SCREAMING_SNAKE_CASE__)
for block in self.transformer_blocks:
__lowerCamelCase : List[Any] = block(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.norm_out(SCREAMING_SNAKE_CASE__)
if self.prd_embedding is not None:
__lowerCamelCase : Dict = hidden_states[:, -1]
else:
__lowerCamelCase : Union[str, Any] = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase : int = self.proj_to_clip_embeddings(SCREAMING_SNAKE_CASE__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 73 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCAmelCase = random.Random()
def snake_case_ ( snake_case , snake_case=1.0 , snake_case=None , snake_case=None ) -> int:
if rng is None:
lowercase__: Tuple = global_rng
lowercase__: str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2_000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=16_000 , lowerCAmelCase__=True , lowerCAmelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = parent
lowercase__: str = batch_size
lowercase__: Any = min_seq_length
lowercase__: List[Any] = max_seq_length
lowercase__: Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__: Dict = feature_size
lowercase__: Tuple = padding_value
lowercase__: Union[str, Any] = sampling_rate
lowercase__: int = return_attention_mask
lowercase__: Any = do_normalize
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
lowercase__: Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase__: str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__: Tuple = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : str = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = WavaVecaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__: int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase__: Union[str, Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowercase__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowercase__: Tuple = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
lowercase__: List[Any] = feat_extract(lowerCAmelCase__ , return_tensors='np' ).input_values
lowercase__: List[str] = feat_extract(lowerCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__: Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__: int = np.asarray(lowerCAmelCase__ )
lowercase__: str = feat_extract(lowerCAmelCase__ , return_tensors='np' ).input_values
lowercase__: Dict = feat_extract(lowerCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__: Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase__: Tuple = ['longest', 'max_length', 'do_not_pad']
lowercase__: Optional[int] = [None, 1_600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors='np' )
lowercase__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__: List[Any] = range(800 , 1_400 , 200 )
lowercase__: List[str] = [floats_list((1, x) )[0] for x in lengths]
lowercase__: Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
lowercase__: Tuple = [None, 1_600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: List[str] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
lowercase__: Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__: Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase__: List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1_000 , padding='max_length' , return_tensors='np' )
lowercase__: List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase__: int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1_000 , padding='longest' , return_tensors='np' )
lowercase__: Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowercase__: Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase__: Optional[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2_000 , padding='longest' , return_tensors='np' )
lowercase__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
import torch
lowercase__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__: Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
lowercase__: str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__: Optional[int] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase__: List[str] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase__: List[Any] = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
lowercase__: Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 288 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowerCAmelCase = {'''vinai/bartpho-syllable''': 10_24}
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: Dict = vocab_file
lowercase__: str = monolingual_vocab_file
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: str = cnt
cnt += 1
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowercase__: Optional[Any] = line.strip().split()[0]
lowercase__: Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.__dict__.copy()
lowercase__: Tuple = None
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Union[str, Any] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
lowercase__: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Dict = [self.sep_token_id]
lowercase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: int = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowerCAmelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 288 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_A = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 231 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =None
_lowercase =BloomTokenizerFast
_lowercase =BloomTokenizerFast
_lowercase =True
_lowercase =False
_lowercase ='''tokenizer_file'''
_lowercase ={'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __a ( self ) -> Dict:
super().setUp()
lowerCAmelCase_ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **_UpperCamelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase_ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase_ = tokenizer.batch_encode_plus(_UpperCamelCase )["input_ids"]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self , _UpperCamelCase=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase_ = "This is a simple input"
lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ = ("This is a simple input", "This is a pair")
lowerCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase_ = None # Hotfixing padding = None
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = load_dataset("xnli" , "all_languages" , split="test" , streaming=_UpperCamelCase )
lowerCAmelCase_ = next(iter(_UpperCamelCase ) )["premise"] # pick up one data
lowerCAmelCase_ = list(sample_data.values() )
lowerCAmelCase_ = list(map(tokenizer.encode , _UpperCamelCase ) )
lowerCAmelCase_ = [tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) for x in output_tokens]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> List[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 231 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'time_series_transformer'
__UpperCAmelCase : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 64 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , **_a , ):
# time series specific configuration
__a = prediction_length
__a = context_length or prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__a = cardinality
else:
__a = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(_a ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = BloomTokenizerFast
lowerCAmelCase__ : Tuple = BloomTokenizerFast
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Union[str, Any] = """tokenizer_file"""
lowerCAmelCase__ : Dict = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def UpperCamelCase__ (self : int ):
'''simple docstring'''
super().setUp()
lowercase__ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : str , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase__ = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase__ = tokenizer.batch_encode_plus(UpperCamelCase )['''input_ids''']
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any]=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ = '''This is a simple input'''
lowercase__ = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ = ('''This is a simple input''', '''This is a pair''')
lowercase__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase__ = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCamelCase )
lowercase__ = next(iter(UpperCamelCase ) )['''premise'''] # pick up one data
lowercase__ = list(sample_data.values() )
lowercase__ = list(map(tokenizer.encode , UpperCamelCase ) )
lowercase__ = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 2 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : List[Any] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = 'Normal'
if result[0][0] == 1:
lowerCamelCase : Any = 'Abnormality detected'
| 2 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a (unittest.TestCase ):
"""simple docstring"""
@property
def __snake_case ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : int = self.dummy_uncond_unet
__snake_case : Any = KarrasVeScheduler()
__snake_case : Tuple = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[int] = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="numpy" ).images
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : Union[str, Any] = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="numpy" , return_dict=lowerCamelCase )[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
__snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any ) -> List[str]:
__snake_case : int = "google/ncsnpp-celebahq-256"
__snake_case : Optional[Any] = UNetaDModel.from_pretrained(lowerCamelCase )
__snake_case : Dict = KarrasVeScheduler()
__snake_case : Any = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : Optional[int] = pipe(num_inference_steps=20 , generator=lowerCamelCase , output_type="numpy" ).images
__snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__snake_case : Optional[int] = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 134 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__snake_case : List[Any] = MaskFormerConfig(backbone_config=__lowerCamelCase )
__snake_case : List[Any] = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
__snake_case : Any = 8_4_7
__snake_case : List[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
__snake_case : Optional[int] = 1_5_0
__snake_case : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
__snake_case : Optional[Any] = 1_7_1
__snake_case : List[str] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
__snake_case : Optional[int] = 1_3_3
__snake_case : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
__snake_case : Union[str, Any] = 1_9
__snake_case : Dict = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
__snake_case : Any = 6_5
__snake_case : Any = "mapillary-vistas-id2label.json"
__snake_case : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = dct.pop(__lowerCamelCase )
__snake_case : Any = val
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Tuple = in_proj_weight[:dim, :]
__snake_case : Tuple = in_proj_bias[: dim]
__snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
__snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
__snake_case : str = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# fmt: off
__snake_case : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: hidden_size, :]
__snake_case : Optional[int] = in_proj_bias[:config.hidden_size]
__snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Tuple = in_proj_weight[-hidden_size :, :]
__snake_case : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : Optional[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : int = in_proj_weight[: hidden_size, :]
__snake_case : Tuple = in_proj_bias[:config.hidden_size]
__snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Optional[Any] = in_proj_weight[-hidden_size :, :]
__snake_case : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Optional[int] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase , "rb" ) as f:
__snake_case : int = pickle.load(__lowerCamelCase )
__snake_case : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__snake_case : Tuple = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__snake_case : int = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
__snake_case : List[str] = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase , param.shape )
__snake_case , __snake_case : List[str] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__snake_case : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__snake_case : Optional[int] = 6_5
elif "cityscapes" in model_name:
__snake_case : Optional[int] = 6_5_5_3_5
else:
__snake_case : Union[str, Any] = 2_5_5
__snake_case : Union[str, Any] = True if "ade" in model_name else False
__snake_case : str = MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase )
__snake_case : List[str] = image_processor(__lowerCamelCase , return_tensors="pt" )
__snake_case : Tuple = model(**__lowerCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__snake_case : Optional[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 134 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt'}
__a = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__a = {
'openbmb/cpm-ant-10b': 1_024,
}
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(a_, "r", encoding="utf-8" ) as reader:
_UpperCAmelCase : List[str] = reader.readlines()
for index, token in enumerate(a_ ):
_UpperCAmelCase : List[str] = token.rstrip("\n" )
_UpperCAmelCase : Optional[int] = index
return vocab
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[str]=2_0_0 ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = vocab
_UpperCAmelCase : Dict = unk_token
_UpperCAmelCase : Tuple = max_input_chars_per_word
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : List[str] = []
while start < len(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = None
while start < end:
_UpperCAmelCase : Dict = "".join(chars[start:end] )
if substr in self.vocab:
_UpperCAmelCase : Any = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase__ )
_UpperCAmelCase : Dict = end
return sub_tokens
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Tuple = False
def __init__( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]="<d>" , lowerCAmelCase__ : str="</d>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : Optional[int]="<pad>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : Any="</n>" , lowerCAmelCase__ : Tuple="</_>" , lowerCAmelCase__ : Tuple="left" , **lowerCAmelCase__ : int , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCAmelCase__ , eod_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , line_token=lowerCAmelCase__ , space_token=lowerCAmelCase__ , padding_side=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Optional[Any] = bod_token
_UpperCAmelCase : str = eod_token
_UpperCAmelCase : List[Any] = load_vocab(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self.encoder[space_token]
_UpperCAmelCase : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_UpperCAmelCase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
_UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : int = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.encoder["\n"]
@property
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = []
for x in jieba.cut(lowerCAmelCase__ , cut_all=lowerCAmelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase__ ) )
return output_tokens
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = [i for i in token_ids if i >= 0]
_UpperCAmelCase : Dict = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
return "".join(lowerCAmelCase__ )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Dict ) -> Tuple:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(lowerCAmelCase__ ):
_UpperCAmelCase : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_UpperCAmelCase : str = (filename_prefix + "-" if filename_prefix else "") + save_directory
_UpperCAmelCase : Tuple = 0
if " " in self.encoder:
_UpperCAmelCase : int = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
_UpperCAmelCase : Optional[int] = self.encoder["\n"]
del self.encoder["\n"]
_UpperCAmelCase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
_UpperCAmelCase : List[str] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ ))
return [1] + ([0] * len(lowerCAmelCase__ )) | 145 | '''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['''input_features''', '''attention_mask''']
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any]=8_0 , lowerCAmelCase__ : Tuple=1_6_0_0_0 , lowerCAmelCase__ : Union[str, Any]=8_0 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]=True , **lowerCAmelCase__ : int , ) -> int:
"""simple docstring"""
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : str = num_mel_bins
_UpperCAmelCase : Optional[int] = do_ceptral_normalize
_UpperCAmelCase : List[str] = normalize_means
_UpperCAmelCase : str = normalize_vars
_UpperCAmelCase : Union[str, Any] = True
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Tuple = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
_UpperCAmelCase : List[Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : float = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
_UpperCAmelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : Dict = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
_UpperCAmelCase : Any = x[:input_length].std(axis=0 )
_UpperCAmelCase : Optional[int] = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
_UpperCAmelCase : str = padding_value
# make sure array is in float32
_UpperCAmelCase : Union[str, Any] = x.astype(np.floataa )
return x
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[np.ndarray] , lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
_UpperCAmelCase : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self : List[Any] , lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Optional[Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : List[Any] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Any = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Dict = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[Any] = [raw_speech]
# extract fbank features
_UpperCAmelCase : Tuple = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Optional[Any] = BatchFeature({"input_features": features} )
_UpperCAmelCase : Optional[Any] = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
_UpperCAmelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Dict = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase : List[str] = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase : str = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
_UpperCAmelCase : Any = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs | 145 | 1 |
'''simple docstring'''
import os
import sys
import unittest
__lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase = os.path.join(git_repo_path, '''src''', '''diffusers''')
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : int ):
_a : List[str] = find_backend(' if not is_torch_available():' )
self.assertEqual(lowerCAmelCase_ ,'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_a : Any = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowerCAmelCase_ ,'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_a : int = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowerCAmelCase_ ,'torch_and_transformers_and_onnx' )
def __lowercase ( self : List[Any] ):
_a : int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' ,lowerCAmelCase_ )
self.assertIn('torch_and_transformers' ,lowerCAmelCase_ )
self.assertIn('flax_and_transformers' ,lowerCAmelCase_ )
self.assertIn('torch_and_transformers_and_onnx' ,lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' ,objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' ,objects['flax'] )
self.assertIn('StableDiffusionPipeline' ,objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' ,objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' ,objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' ,objects['torch_and_transformers_and_onnx'] )
def __lowercase ( self : int ):
_a : Union[str, Any] = create_dummy_object('CONSTANT' ,'\'torch\'' )
self.assertEqual(lowerCAmelCase_ ,'\nCONSTANT = None\n' )
_a : Optional[int] = create_dummy_object('function' ,'\'torch\'' )
self.assertEqual(
lowerCAmelCase_ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_a : List[str] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_a : Any = create_dummy_object('FakeClass' ,'\'torch\'' )
self.assertEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
def __lowercase ( self : str ):
_a : str = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n'
_a : Optional[Any] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] ,lowerCAmelCase_ )
| 365 |
'''simple docstring'''
__lowerCAmelCase = range(2, 20 + 1)
__lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase = {}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : Optional[int] = sum(a_i[j] for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) )
_a : List[str] = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) )
_a , _a : Any = 0, 0
_a : Any = n - i
_a : List[Any] = memo.get(lowerCAmelCase_ )
if sub_memo is not None:
_a : Tuple = sub_memo.get(lowerCAmelCase_ )
if jumps is not None and len(lowerCAmelCase_ ) > 0:
# find and make the largest jump without going over
_a : Any = -1
for _k in range(len(lowerCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_a : Any = _k
break
if max_jump >= 0:
_a , _a , _a : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
_a : Union[str, Any] = diff + c
for j in range(min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) ):
_a , _a : Dict = divmod(lowerCAmelCase_ , 10 )
if new_c > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_a : Tuple = []
else:
_a : Any = {c: []}
_a : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_a , _a : Dict = next_term(lowerCAmelCase_ , k - 1 , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_a , _a : Any = compute(lowerCAmelCase_ , lowerCAmelCase_ , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
_a : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
_a : Any = 0
while j < len(lowerCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if i >= n:
return 0, i
if k > len(lowerCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_a : Any = i
_a , _a , _a : Optional[int] = 0, 0, 0
for j in range(len(lowerCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_a : Any = ds_c + ds_b
diff += addend
_a : int = 0
for j in range(lowerCAmelCase_ ):
_a : Optional[Any] = a_i[j] + addend
_a , _a : Tuple = divmod(lowerCAmelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return diff, i - start_i
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
_a : Optional[Any] = digits[j] + addend
if s >= 10:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
_a : List[str] = addend // 10 + quotient
else:
_a : Optional[Any] = s
_a : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_a , _a : List[str] = divmod(lowerCAmelCase_ , 10 )
digits.append(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 10**15 ) -> int:
_a : Dict = [1]
_a : int = 1
_a : Tuple = 0
while True:
_a , _a : str = next_term(lowerCAmelCase_ , 20 , i + dn , lowerCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_a : Union[str, Any] = 0
for j in range(len(lowerCAmelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 107 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
UpperCAmelCase__ = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = SqueezeBertTokenizer
def __init__( self : List[str] , A : Dict=None , A : int=None , A : List[str]=True , A : Any="[UNK]" , A : int="[SEP]" , A : List[str]="[PAD]" , A : Union[str, Any]="[CLS]" , A : Optional[Any]="[MASK]" , A : Union[str, Any]=True , A : Union[str, Any]=None , **A : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , A) != do_lower_case
or normalizer_state.get('strip_accents' , A) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(A , normalizer_state.pop('type'))
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**A)
_UpperCAmelCase = do_lower_case
def _lowerCamelCase ( self : List[str] , A : Any , A : List[Any]=None) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(A , name=A)
return tuple(A)
| 339 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 1 |
"""simple docstring"""
UpperCAmelCase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 172 |
"""simple docstring"""
def lowerCamelCase (a_ :int , a_ :int) -> int:
while a != 0:
lowercase , lowercase :Dict = b % a, a
return b
def lowerCamelCase (a_ :int , a_ :int) -> int:
if gcd(a_ , a_) != 1:
lowercase :List[Any] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(a_)
lowercase , lowercase , lowercase :List[str] = 1, 0, a
lowercase , lowercase , lowercase :int = 0, 1, m
while va != 0:
lowercase :Union[str, Any] = ua // va
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase :Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : Any = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase : List[str] = dict(zip(A_ , range(len(A_ ) ) ) )
__lowerCAmelCase : Tuple = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase : Dict = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A_ ) )
__lowerCAmelCase : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def UpperCamelCase__ ( self , **A_ ) ->Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->str:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Tuple = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__lowerCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=A_ )
__lowerCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : List[Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Tuple = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' )
__lowerCAmelCase : Union[str, Any] = processor(images=A_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Tuple = '''lower newer'''
__lowerCAmelCase : List[str] = processor(text=A_ , return_tensors='''np''' )
__lowerCAmelCase : List[str] = tokenizer(A_ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : List[str] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Tuple = '''lower newer'''
__lowerCAmelCase : str = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = '''google/owlvit-base-patch32'''
__lowerCAmelCase : Any = OwlViTProcessor.from_pretrained(A_ )
__lowerCAmelCase : List[str] = ['''cat''', '''nasa badge''']
__lowerCAmelCase : str = processor(text=A_ )
__lowerCAmelCase : Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = '''google/owlvit-base-patch32'''
__lowerCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(A_ )
__lowerCAmelCase : Dict = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase : List[Any] = processor(text=A_ )
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : List[str] = len(A_ )
__lowerCAmelCase : Dict = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = '''google/owlvit-base-patch32'''
__lowerCAmelCase : List[Any] = OwlViTProcessor.from_pretrained(A_ )
__lowerCAmelCase : Tuple = ['''cat''', '''nasa badge''']
__lowerCAmelCase : List[str] = processor(text=A_ )
__lowerCAmelCase : Dict = 16
__lowerCAmelCase : Optional[int] = inputs['''input_ids''']
__lowerCAmelCase : List[Any] = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[str] = self.prepare_image_inputs()
__lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : str = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Any = processor.batch_decode(A_ )
__lowerCAmelCase : Dict = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 275 |
def _lowercase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:]
__lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : List[Any]=30 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Union[str, Any]=10 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 2
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = TFDeiTModel(config=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
_UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = TFDeiTForImageClassification(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFDeiTForImageClassification(__lowerCAmelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : str = False
_snake_case : Any = False
_snake_case : Union[str, Any] = False
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = TFDeiTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Dense ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Dict ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# forward pass
_UpperCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 30 | """simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30 | 1 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(A_ ) , A_ )
return number - int(A_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 40 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (__A ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = proj_size
UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ )
UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output
UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase_ (nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Optional[Any] = config.hidden_size
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ )
] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
for block in self.blocks:
UpperCAmelCase_ : int = block(lowerCAmelCase_ )
return hidden_states
| 268 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 157 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Optional[int]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Tuple =hans_processors[task]()
UpperCamelCase__ : Union[str, Any] =os.path.join(
lowercase_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCamelCase__ : int =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =label_list[2], label_list[1]
UpperCamelCase__ : List[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Any =cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase__ : Optional[int] =torch.load(lowercase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase__ : str =(
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('''Training examples: %s''' , len(lowercase_ ) )
UpperCamelCase__ : Tuple =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('''Saving features into cached file %s''' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , lowercase_ : Optional[Any] ):
return self.features[i]
def _lowerCAmelCase ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Any , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : Union[str, Any]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Any =hans_processors[task]()
UpperCamelCase__ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Tuple =label_list[2], label_list[1]
UpperCamelCase__ : Union[str, Any] =label_list
UpperCamelCase__ : Any =processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCamelCase__ : Union[str, Any] =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[Any] =tf.data.Dataset.from_generator(
lowercase_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.dataset
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[str] , lowercase_ : Dict ):
return self.features[i]
def _lowerCAmelCase ( self : Dict ):
return self.label_list
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _lowerCAmelCase ( self : List[Any] ):
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =[]
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCamelCase__ : str ='''%s-%s''' % (set_type, line[0])
UpperCamelCase__ : str =line[5]
UpperCamelCase__ : Any =line[6]
UpperCamelCase__ : Optional[int] =line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCamelCase__ : str =line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def _lowerCAmelCase ( UpperCAmelCase : List[InputExample] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : PreTrainedTokenizer , ):
'''simple docstring'''
UpperCamelCase__ : List[str] ={label: i for i, label in enumerate(UpperCAmelCase )}
UpperCamelCase__ : int =[]
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCamelCase__ : str =tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
UpperCamelCase__ : str =label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : int =int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
_SCREAMING_SNAKE_CASE : List[str] = {
"""hans""": 3,
}
_SCREAMING_SNAKE_CASE : Tuple = {
"""hans""": HansProcessor,
}
| 157 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.