code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = BertJapaneseTokenizer
__a : Any = False
__a : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.get_input_output_texts(lowerCAmelCase__ )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = JumanppTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = JumanppTokenizer(normalize_text=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = JumanppTokenizer(trim_whitespace=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__lowercase = {}
for i, token in enumerate(lowerCAmelCase__ ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
__lowercase = tokenizer.subword_tokenizer
__lowercase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
__lowercase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
__lowercase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : Union[str, Any] = BertJapaneseTokenizer
__a : Tuple = False
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
__lowercase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCAmelCase__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = {}
for i, token in enumerate(lowerCAmelCase__ ):
__lowercase = i
__lowercase = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
__lowercase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
__lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
__lowercase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 210
| 0
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case_ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case_ = """main"""
# Default branch name
snake_case_ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
snake_case_ = """aaaaaaa"""
# This commit does not exist, so we should 404.
snake_case_ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case_ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def _lowerCAmelCase ( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _lowerCAmelCase ( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str] ) -> Any:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :int ) -> Optional[Any]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
self.assertEqual(find_labels(lowercase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase_ ) , ['start_positions', 'end_positions'] )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
@require_tf
def UpperCAmelCase__ ( self :str ) -> Tuple:
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
self.assertEqual(find_labels(lowercase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase_ ) , ['start_positions', 'end_positions'] )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
@require_flax
def UpperCAmelCase__ ( self :int ) -> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase_ ) , [] )
| 181
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ = 3
def _lowerCAmelCase ( lowercase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
UpperCAmelCase = primitive_root(lowercase_ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 181
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =XLNetTokenizer
UpperCAmelCase_ =XLNetTokenizerFast
UpperCAmelCase_ =True
UpperCAmelCase_ =True
def _UpperCamelCase ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = XLNetTokenizer(_A , keep_accents=_A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = '''<s>'''
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(_A ) , 1006 )
def _UpperCamelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = XLNetTokenizer(_A , keep_accents=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = XLNetTokenizer(_A , do_lower_case=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = XLNetTokenizer(_A , do_lower_case=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {'''input_ids''': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 299
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCAmelCase : Any = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
assert isinstance(__lowercase ,__lowercase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
_lowerCAmelCase : Tuple = f"The input value of [n={number}] has to be > 0"
raise ValueError(__lowercase )
else:
_lowerCAmelCase : Union[str, Any] = sylvester(number - 1 )
_lowerCAmelCase : List[str] = num - 1
_lowerCAmelCase : Any = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester\'s sequence: {sylvester(8)}""")
| 44
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCamelCase = (wi_a, wi_a)
else:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
__UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , __lowercase , 'encoder' ).T
__UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'encoder' ).T
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T
__UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase )
__UpperCamelCase = convert_tax_to_pytorch(
__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase )
__UpperCamelCase = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = MTaConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase = UMTaEncoderModel(__lowercase )
else:
__UpperCamelCase = UMTaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ : List[str] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 53
| 0
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for part_id in partition_order:
lowercase_ : int = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(__SCREAMING_SNAKE_CASE ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowercase_ : Optional[int] = spark.range(100 ).repartition(1 )
lowercase_ : List[Any] = Spark(__SCREAMING_SNAKE_CASE )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowercase_ : Optional[Any] = spark.range(10 ).repartition(2 )
lowercase_ : List[Any] = [1, 0]
lowercase_ : Optional[int] = _generate_iterable_examples(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Reverse the partitions.
lowercase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase_ , lowercase_ : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowercase_ : str = spark.range(10 ).repartition(1 )
lowercase_ : List[str] = SparkExamplesIterable(__SCREAMING_SNAKE_CASE )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowercase_ : Any = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
lowercase_ : Optional[int] = lambda __SCREAMING_SNAKE_CASE : x.reverse()
lowercase_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [2, 1, 0] )
lowercase_ : Any = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shuffle_data_sources(__SCREAMING_SNAKE_CASE )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowercase_ : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase_ : str = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [0, 2] )
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase_ : List[Any] = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [1, 3] )
for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowercase_ : int = spark.range(100 ).repartition(1 )
lowercase_ : Union[str, Any] = Spark(__SCREAMING_SNAKE_CASE )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 264
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : str = "▁"
_lowercase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Dict = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_lowercase : Optional[Any] = {
"facebook/xglm-564M": 2_0_4_8,
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase_ : Optional[Any] = 7
lowercase_ : List[Any] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase_ : Tuple = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : List[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase_ : Dict = len(self.sp_model )
lowercase_ : int = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Optional[Any] = {}
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase_ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : str = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : str = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 264
| 1
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase ( lowerCAmelCase__ ):
@require_torch
def a_ ( self) -> Tuple:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase__)
BertModel.from_pretrained(lowerCAmelCase__)
BertTokenizer.from_pretrained(lowerCAmelCase__)
pipeline(task='fill-mask', model=lowerCAmelCase__)
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
snake_case_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def a_ ( self) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase__)
BertModel.from_pretrained(lowerCAmelCase__)
BertTokenizer.from_pretrained(lowerCAmelCase__)
pipeline(task='fill-mask', model=lowerCAmelCase__)
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def a_ ( self) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# next emulate no network
snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def a_ ( self) -> Optional[int]:
snake_case_ = '\nfrom transformers import pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
snake_case_ = self.get_env()
snake_case_ = '1'
snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run])]
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 1, result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', ''), )
@require_torch
def a_ ( self) -> List[str]:
snake_case_ = '\nfrom transformers import AutoModel\n '
snake_case_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
| 69
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Dict = ['image_processor']
lowerCamelCase : str = 'SamImageProcessor'
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.image_processor
__lowerCamelCase : str = -10
__lowerCamelCase : List[str] = self.image_processor.size['longest_edge']
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
__lowerCamelCase : Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCamelCase : List[Any] = encoding_image_processor['original_sizes']
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks if Torch or TF tensor
__lowerCamelCase : Dict = original_sizes.numpy()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="pt" , ) -> Optional[int]:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
__lowerCamelCase : List[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCamelCase , __lowerCamelCase : Tuple = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
__lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : str = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCamelCase : Dict = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCamelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCamelCase : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : List[str] = max([point.shape[0] for point in input_points] )
__lowerCamelCase : Union[str, Any] = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
__lowerCamelCase : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowerCamelCase : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = processed_input_points
return input_points, input_labels
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> np.ndarray:
__lowerCamelCase , __lowerCamelCase : Tuple = original_size
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
__lowerCamelCase : Optional[int] = coords.reshape(-1 , 2 , 2 )
__lowerCamelCase : List[Any] = coords[..., 0] * (new_w / old_w)
__lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCamelCase : Tuple = coords.reshape(-1 , 4 )
return coords
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Optional[Any]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks for TF or Torch tensor
__lowerCamelCase : List[str] = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__lowerCamelCase : str = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
__lowerCamelCase : Optional[Any] = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input labels must be a list of list integers.' )
__lowerCamelCase : Any = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
__lowerCamelCase : Dict = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : int = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__lowerCamelCase : List[Any] = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCamelCase : Tuple = None
return input_points, input_labels, input_boxes
@property
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 185
| 0
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
UpperCamelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = processor(text=_UpperCAmelCase )
_lowerCAmelCase : Dict = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Tuple = """lower newer"""
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : str = processor.batch_decode(_UpperCAmelCase )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Dict = """lower newer"""
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 159
| 1
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 181
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 181
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 0
|
'''simple docstring'''
import os
def __snake_case( ) -> Optional[Any]:
with open(os.path.dirname(_lowerCAmelCase ) + """/p022_names.txt""" ) as file:
snake_case__ : int = str(file.readlines()[0] )
snake_case__ : Tuple = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 0
for i, name in enumerate(_lowerCAmelCase ):
for letter in name:
name_score += ord(_lowerCAmelCase ) - 64
total_score += (i + 1) * name_score
snake_case__ : List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 35
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: int = KandinskyVaaImgaImgPipeline
_lowercase: List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase: Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase: Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase: List[str] = False
@property
def lowercase__ ( self : str ) -> List[str]:
return 32
@property
def lowercase__ ( self : Optional[int] ) -> List[Any]:
return 32
@property
def lowercase__ ( self : Tuple ) -> str:
return self.time_input_dim
@property
def lowercase__ ( self : Any ) -> Optional[int]:
return self.time_input_dim * 4
@property
def lowercase__ ( self : int ) -> Optional[Any]:
return 1_00
@property
def lowercase__ ( self : int ) -> Dict:
torch.manual_seed(0 )
_lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase = UNetaDConditionModel(**__snake_case )
return model
@property
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Dict ) -> str:
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase = DDIMScheduler(**__snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Union[str, Any]:
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : str ) -> Tuple:
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
_lowerCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Any ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase = """A red cartoon frog, 4k"""
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
_lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 70
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase :Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase :Union[str, Any] = JsonDatasetReader(__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_json_dataset(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
UpperCamelCase :Optional[Any] = tmp_path / """cache"""
UpperCamelCase :Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :List[str] = features.copy() if features else default_expected_features
UpperCamelCase :Union[str, Any] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase :Dict = JsonDatasetReader(__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_json_dataset(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Dict = tmp_path / """cache"""
UpperCamelCase :Union[str, Any] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
UpperCamelCase :Dict = features.copy() if features else default_expected_features
UpperCamelCase :int = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase :List[str] = JsonDatasetReader(__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ ).read()
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :List[Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
UpperCamelCase :Optional[int] = features.copy()
UpperCamelCase :Optional[Any] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase :str = tmp_path / """cache"""
UpperCamelCase :Tuple = JsonDatasetReader(__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ ).read()
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : str ) -> str:
"""simple docstring"""
UpperCamelCase :List[Any] = tmp_path / """cache"""
UpperCamelCase :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :Optional[Any] = JsonDatasetReader(__magic_name__ , cache_dir=__magic_name__ , split=__magic_name__ ).read()
_check_json_dataset(__magic_name__ , __magic_name__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if issubclass(__magic_name__ , __magic_name__ ):
UpperCamelCase :int = jsonl_path
elif issubclass(__magic_name__ , __magic_name__ ):
UpperCamelCase :Any = [jsonl_path]
UpperCamelCase :Any = tmp_path / """cache"""
UpperCamelCase :Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :Optional[int] = JsonDatasetReader(__magic_name__ , cache_dir=__magic_name__ ).read()
_check_json_dataset(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any]=("train",) ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
for split in splits:
UpperCamelCase :int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Any = tmp_path / """cache"""
UpperCamelCase :Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase :Optional[int] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_json_datasetdict(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : str ) -> int:
"""simple docstring"""
UpperCamelCase :str = tmp_path / """cache"""
UpperCamelCase :Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :int = features.copy() if features else default_expected_features
UpperCamelCase :List[Any] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase :Optional[int] = JsonDatasetReader({"""train""": jsonl_path} , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_json_datasetdict(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
if split:
UpperCamelCase :Optional[int] = {split: jsonl_path}
else:
UpperCamelCase :Tuple = """train"""
UpperCamelCase :Dict = {"""train""": jsonl_path, """test""": jsonl_path}
UpperCamelCase :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase :Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :List[Any] = JsonDatasetReader(__magic_name__ , cache_dir=__magic_name__ ).read()
_check_json_datasetdict(__magic_name__ , __magic_name__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
return json.load(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return [json.loads(__magic_name__ ) for line in buffer]
class _SCREAMING_SNAKE_CASE :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase ).write()
buffer.seek(0 )
UpperCamelCase :List[str] = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _A ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase ).write()
buffer.seek(0 )
UpperCamelCase :Optional[Any] = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _A ( self : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase :Tuple = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _A ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase :Any = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
def _A ( self : str , __lowerCamelCase : str ):
with pytest.raises(__lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _A ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : str ):
UpperCamelCase :str = tmp_path_factory.mktemp("""data""" ) / F"""test.json.{extension}"""
UpperCamelCase :Tuple = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , compression=__lowerCamelCase ).write()
with fsspec.open(__lowerCamelCase , """rb""" , compression="""infer""" ) as f:
UpperCamelCase :Optional[Any] = f.read()
with fsspec.open(__lowerCamelCase , """rb""" , compression="""infer""" ) as f:
UpperCamelCase :str = f.read()
assert exported_content == original_content
| 62
|
from __future__ import annotations
from collections import deque
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __lowerCamelCase : list[str] ):
UpperCamelCase :list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(__lowerCamelCase )
self.set_fail_transitions()
def _A ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _A ( self : int , __lowerCamelCase : str ):
UpperCamelCase :List[str] = 0
for character in keyword:
UpperCamelCase :List[Any] = self.find_next_state(__lowerCamelCase , __lowerCamelCase )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase :Optional[int] = len(self.adlist ) - 1
else:
UpperCamelCase :Union[str, Any] = next_state
self.adlist[current_state]["output"].append(__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__lowerCamelCase )
UpperCamelCase :Tuple = 0
while q:
UpperCamelCase :Union[str, Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__lowerCamelCase )
UpperCamelCase :Optional[int] = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(__lowerCamelCase , self.adlist[child]["""value"""] ) is None
and state != 0
):
UpperCamelCase :List[Any] = self.adlist[state]["""fail_state"""]
UpperCamelCase :List[str] = self.find_next_state(
__lowerCamelCase , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase :Any = 0
UpperCamelCase :Tuple = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
UpperCamelCase :dict = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase :Union[str, Any] = 0
for i in range(len(__lowerCamelCase ) ):
while (
self.find_next_state(__lowerCamelCase , string[i] ) is None
and current_state != 0
):
UpperCamelCase :List[Any] = self.adlist[current_state]["""fail_state"""]
UpperCamelCase :Optional[Any] = self.find_next_state(__lowerCamelCase , string[i] )
if next_state is None:
UpperCamelCase :Dict = 0
else:
UpperCamelCase :List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase :Optional[int] = []
result[key].append(i - len(__lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
snake_case_ : List[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ : Any = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
snake_case_ : str = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : List[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
# load decoder from hub
snake_case_ : int = '''hf-internal-testing/ngram-beam-search-decoder'''
def _snake_case ( self : Any , **lowercase_ : Optional[Any] ):
snake_case_ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(lowercase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self : str , **lowercase_ : Dict ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self : Union[str, Any] , **lowercase_ : int ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowercase_ )
def _snake_case ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Dict = self.get_feature_extractor()
snake_case_ : int = self.get_decoder()
snake_case_ : str = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ : int = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case_ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _snake_case ( self : Dict ):
snake_case_ : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowercase_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowercase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _snake_case ( self : int ):
snake_case_ : int = self.get_feature_extractor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_decoder()
snake_case_ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
snake_case_ : str = floats_list((3, 1000) )
snake_case_ : List[str] = feature_extractor(lowercase_ , return_tensors='''np''' )
snake_case_ : int = processor(lowercase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self : Optional[int] ):
snake_case_ : Tuple = self.get_feature_extractor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_decoder()
snake_case_ : int = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
snake_case_ : Union[str, Any] = '''This is a test string'''
snake_case_ : Dict = processor(text=lowercase_ )
snake_case_ : Tuple = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : List[str] , lowercase_ : Any=(2, 10, 16) , lowercase_ : str=77 ):
np.random.seed(lowercase_ )
return np.random.rand(*lowercase_ )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Optional[int] = self.get_feature_extractor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : int = self.get_decoder()
snake_case_ : str = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
snake_case_ : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
snake_case_ : Optional[Any] = processor.decode(lowercase_ )
snake_case_ : Union[str, Any] = decoder.decode_beams(lowercase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _snake_case ( self : Optional[Any] , lowercase_ : Union[str, Any] ):
snake_case_ : Any = self.get_feature_extractor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : List[str] = self.get_decoder()
snake_case_ : str = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
snake_case_ : Any = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case_ : str = processor.batch_decode(lowercase_ )
else:
with get_context(lowercase_ ).Pool() as pool:
snake_case_ : Any = processor.batch_decode(lowercase_ , lowercase_ )
snake_case_ : int = list(lowercase_ )
with get_context('''fork''' ).Pool() as p:
snake_case_ : Union[str, Any] = decoder.decode_beams_batch(lowercase_ , lowercase_ )
snake_case_, snake_case_, snake_case_ : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowercase_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(lowercase_ , decoded_processor.logit_score )
self.assertListEqual(lowercase_ , decoded_processor.lm_score )
def _snake_case ( self : List[str] ):
snake_case_ : Any = self.get_feature_extractor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : str = self.get_decoder()
snake_case_ : Any = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
snake_case_ : Optional[Any] = self._get_dummy_logits()
snake_case_ : str = 15
snake_case_ : Tuple = -20.0
snake_case_ : Dict = -4.0
snake_case_ : Optional[Any] = processor.batch_decode(
lowercase_ , beam_width=lowercase_ , beam_prune_logp=lowercase_ , token_min_logp=lowercase_ , )
snake_case_ : Union[str, Any] = decoded_processor_out.text
snake_case_ : str = list(lowercase_ )
with get_context('''fork''' ).Pool() as pool:
snake_case_ : str = decoder.decode_beams_batch(
lowercase_ , lowercase_ , beam_width=lowercase_ , beam_prune_logp=lowercase_ , token_min_logp=lowercase_ , )
snake_case_ : List[str] = [d[0][0] for d in decoded_decoder_out]
snake_case_ : Dict = [d[0][2] for d in decoded_decoder_out]
snake_case_ : int = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , lowercase_ )
self.assertTrue(np.array_equal(lowercase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , lowercase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowercase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , lowercase_ , atol=1E-3 ) )
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[int] = self.get_feature_extractor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Optional[Any] = self.get_decoder()
snake_case_ : Dict = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
snake_case_ : str = self._get_dummy_logits()
snake_case_ : Optional[int] = 2.0
snake_case_ : str = 5.0
snake_case_ : Tuple = -20.0
snake_case_ : Tuple = True
snake_case_ : List[Any] = processor.batch_decode(
lowercase_ , alpha=lowercase_ , beta=lowercase_ , unk_score_offset=lowercase_ , lm_score_boundary=lowercase_ , )
snake_case_ : Any = decoded_processor_out.text
snake_case_ : Union[str, Any] = list(lowercase_ )
decoder.reset_params(
alpha=lowercase_ , beta=lowercase_ , unk_score_offset=lowercase_ , lm_score_boundary=lowercase_ , )
with get_context('''fork''' ).Pool() as pool:
snake_case_ : Dict = decoder.decode_beams_batch(
lowercase_ , lowercase_ , )
snake_case_ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , lowercase_ )
snake_case_ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowercase_ )
def _snake_case ( self : Dict ):
snake_case_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ : Tuple = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
snake_case_ : List[Any] = os.listdir(lowercase_ )
snake_case_ : int = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : int = snapshot_download('''hf-internal-testing/processor_with_lm''' )
snake_case_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(lowercase_ )
snake_case_ : str = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
snake_case_ : Optional[Any] = os.listdir(lowercase_ )
snake_case_ : List[str] = os.listdir(lowercase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case_ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case_ : List[Any] = floats_list((3, 1000) )
snake_case_ : List[Any] = processor_wavaveca(lowercase_ , return_tensors='''np''' )
snake_case_ : str = processor_auto(lowercase_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
snake_case_ : int = self._get_dummy_logits()
snake_case_ : List[str] = processor_wavaveca.batch_decode(lowercase_ )
snake_case_ : int = processor_auto.batch_decode(lowercase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : str = self.get_feature_extractor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : List[Any] = self.get_decoder()
snake_case_ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowercase_ , feature_extractor=lowercase_ , decoder=lowercase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def _snake_case ( lowercase_ : Tuple , lowercase_ : str ):
snake_case_ : int = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : str ):
snake_case_ : int = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case_ : Dict = self._get_dummy_logits()[0]
snake_case_ : int = processor.decode(lowercase_ , output_word_offsets=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def _snake_case ( self : Optional[int] ):
snake_case_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case_ : Optional[Any] = self._get_dummy_logits()
snake_case_ : int = processor.batch_decode(lowercase_ , output_word_offsets=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowercase_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _snake_case ( self : int ):
import torch
snake_case_ : List[Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=lowercase_ )
snake_case_ : Any = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
snake_case_ : Dict = iter(lowercase_ )
snake_case_ : List[Any] = next(lowercase_ )
snake_case_ : int = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
snake_case_ : str = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case_ : Tuple = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
snake_case_ : Optional[int] = model(lowercase_ ).logits.cpu().numpy()
snake_case_ : List[str] = processor.decode(logits[0] , output_word_offsets=lowercase_ )
snake_case_ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case_ : Optional[int] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
snake_case_ : Union[str, Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowercase_ , '''word''' ) ) , lowercase_ )
self.assertEqual(''' '''.join(self.get_from_offsets(lowercase_ , '''word''' ) ) , output.text )
# output times
snake_case_ : Tuple = torch.tensor(self.get_from_offsets(lowercase_ , '''start_time''' ) )
snake_case_ : Dict = torch.tensor(self.get_from_offsets(lowercase_ , '''end_time''' ) )
# fmt: off
snake_case_ : Tuple = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
snake_case_ : int = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=0.01 ) )
| 264
|
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264
| 1
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = BartphoTokenizer
a_ = False
a_ = True
def _lowercase ( self : int ):
super().setUp()
snake_case__ : Union[str, Any] = ["▁This", "▁is", "▁a", "▁t", "est"]
snake_case__ : Optional[Any] = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : str = {"unk_token": "<unk>"}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
snake_case__ : Optional[Any] = BartphoTokenizer(__A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **__A : Any ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowercase ( self : Tuple , __A : Optional[int] ):
snake_case__ : Dict = "This is a là test"
snake_case__ : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = BartphoTokenizer(__A , self.monolingual_vocab_file , **self.special_tokens_map )
snake_case__ : List[str] = "This is a là test"
snake_case__ : int = "▁This ▁is ▁a ▁l à ▁t est".split()
snake_case__ : Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
snake_case__ : Dict = tokens + [tokenizer.unk_token]
snake_case__ : Optional[int] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
| 286
|
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : float , snake_case_ : float ):
return round(float(moles / volume ) * nfactor )
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
snake_case_ = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case_ = model(_lowerCAmelCase )["last_hidden_state"]
snake_case_ = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
snake_case_ = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 159
|
from math import pi
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 159
| 1
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> List[str]:
"""simple docstring"""
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
try:
UpperCAmelCase = tempfile.mktemp()
with open(lowerCAmelCase , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCAmelCase )
UpperCAmelCase = AlbertTokenizer.from_pretrained(lowerCAmelCase )
finally:
os.remove(lowerCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCAmelCase )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class UpperCamelCase__( unittest.TestCase ):
__magic_name__ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def a__( cls : str )-> Any:
"""simple docstring"""
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def a__( cls : Dict )-> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def a__( self : List[str] )-> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowerCAmelCase )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase , repo_id='''test-tokenizer''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def a__( self : Any )-> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowerCAmelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = CustomTokenizer(lowerCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowerCAmelCase )
bert_tokenizer.save_pretrained(lowerCAmelCase )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowerCAmelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
UpperCAmelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=lowerCAmelCase , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def a__( self : str )-> Any:
"""simple docstring"""
UpperCAmelCase = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def a__( self : str )-> Any:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def a__( self : List[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def a__( self : Dict )-> str:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = Trie()
UpperCAmelCase = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCAmelCase , ['''AB''', '''C'''] )
| 91
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[str] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=0 , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = projection_dim
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
_UpperCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRContextEncoder(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRQuestionEncoder(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRReader(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFDPRModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRReader.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_UpperCAmelCase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_UpperCAmelCase = model(UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCAmelCase = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 39
|
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] =logging.get_logger(__name__)
__snake_case : Union[str, Any] ={
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""markuplm"""
def __init__(self ,__lowerCamelCase=3_05_22 ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=0 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase=2_56 ,__lowerCamelCase=10_24 ,__lowerCamelCase=2_16 ,__lowerCamelCase=10_01 ,__lowerCamelCase=32 ,__lowerCamelCase=50 ,__lowerCamelCase="absolute" ,__lowerCamelCase=True ,__lowerCamelCase=None ,**__lowerCamelCase ,) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = position_embedding_type
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : Dict = classifier_dropout
# additional properties
lowerCAmelCase__ : int = max_depth
lowerCAmelCase__ : List[Any] = max_xpath_tag_unit_embeddings
lowerCAmelCase__ : Optional[Any] = max_xpath_subs_unit_embeddings
lowerCAmelCase__ : Tuple = tag_pad_id
lowerCAmelCase__ : Any = subs_pad_id
lowerCAmelCase__ : Dict = xpath_unit_hidden_size
| 365
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=7 ,__lowerCamelCase=3 ,__lowerCamelCase=18 ,__lowerCamelCase=30 ,__lowerCamelCase=4_00 ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=[0.5, 0.5, 0.5] ,) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : List[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
lowerCAmelCase__ : List[str] = do_thumbnail
lowerCAmelCase__ : str = do_align_axis
lowerCAmelCase__ : Optional[Any] = do_pad
lowerCAmelCase__ : Tuple = do_normalize
lowerCAmelCase__ : List[Any] = image_mean
lowerCAmelCase__ : List[str] = image_std
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =DonutImageProcessor if is_vision_available() else None
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : int = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 94
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_00 ):
__UpperCamelCase =2**power
__UpperCamelCase =str(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE__ )
return sum_of_num
if __name__ == "__main__":
_A = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_A = solution(power)
print('Sum of the digits is: ', result)
| 62
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : str=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Union[str, Any]:
return self._labels
@property
def _a ( self ) -> Optional[Any]:
return self._num_examples
@property
def _a ( self ) -> List[str]:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Optional[Any]:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : str=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 62
| 1
|
'''simple docstring'''
import numpy as np
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return 1 / (1 + np.exp(-vector ))
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
|
'''simple docstring'''
from random import randint, random
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 5 , ):
_snake_case = [[-1] * number_of_cells] # Create a highway without any car
_snake_case = 0
_snake_case = max(_SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
_snake_case = (
randint(0 , _SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = 0
_snake_case = highway_now[car_index + 1 :]
for cell in range(len(_SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_SCREAMING_SNAKE_CASE , -1 )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
_snake_case = [-1] * number_of_cells
for car_index in range(_SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case = min(highway_now[car_index] + 1 , _SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
_snake_case = get_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
_snake_case = min(next_highway[car_index] , _SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
_snake_case = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = len(highway[0] )
for i in range(_SCREAMING_SNAKE_CASE ):
_snake_case = update(highway[i] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = [-1] * number_of_cells
for car_index in range(_SCREAMING_SNAKE_CASE ):
_snake_case = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case = speed
highway.append(_SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 286
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass
| 286
| 1
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : int="" , lowerCAmelCase_ : Optional[int]="train" ) -> Tuple:
'''simple docstring'''
assert os.path.isdir(SCREAMING_SNAKE_CASE_ )
A__ : Optional[int] =[]
A__ : str =os.listdir(SCREAMING_SNAKE_CASE_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
A__ : List[Any] =os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE_ )
def __len__( self : str ) -> List[Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.documents[idx]
A__ : Tuple =document_path.split("""/""" )[-1]
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as source:
A__ : List[str] =source.read()
A__ : str =process_story(SCREAMING_SNAKE_CASE_ )
return document_name, story_lines, summary_lines
def __lowerCamelCase ( __snake_case : int ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] =list(filter(lambda __snake_case : len(snake_case__ ) != 0, [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
A__ : Any =[_add_missing_period(snake_case__ ) for line in nonempty_lines]
# gather article lines
A__ : List[str] =[]
A__ : Union[str, Any] =deque(snake_case__ )
while True:
try:
A__ : Tuple =lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(snake_case__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
A__ : List[str] =list(filter(lambda __snake_case : not t.startswith("""@highlight""" ), snake_case__ ) )
return story_lines, summary_lines
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ : Dict =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[Any], __snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if len(snake_case__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(snake_case__ )) )
return sequence
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[Any] =torch.ones_like(snake_case__ )
A__ : List[Any] =sequence == pad_token_id
A__ : List[str] =0
return mask
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : str, __snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ : Dict =[tokenizer.encode(snake_case__ ) for line in story_lines]
A__ : Any =[token for sentence in story_lines_token_ids for token in sentence]
A__ : Optional[int] =[tokenizer.encode(snake_case__ ) for line in summary_lines]
A__ : Any =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict ) -> str:
"""simple docstring"""
A__ : Tuple =[]
for sequence in batch:
A__ : Optional[int] =-1
A__ : Optional[int] =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(snake_case__ )
return torch.tensor(snake_case__ )
| 365
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
A__ : List[Any] =prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : List[Any]):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Optional[int]=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = {}
if prompt is not None:
SCREAMING_SNAKE_CASE_ : Tuple = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE_ : int = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Any):
'''simple docstring'''
return super().__call__(lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str]=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = load_image(lowercase_)
if prompt is not None:
if not isinstance(lowercase_ , lowercase_):
raise ValueError(
F'Received an invalid text input, got - {type(lowercase_)} - but expected a single string. '
'''Note also that one single text can be provided for conditional image to text generation.''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_).input_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(lowercase_).unsqueeze(0)
model_inputs.update({'''input_ids''': input_ids})
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE_ : str = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE_ : int = self.image_processor(images=lowercase_ , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , return_tensors=self.framework)
model_inputs.update(lowercase_)
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation')
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE_ : List[str] = None
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int]=None):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowercase_)
and all(x is None for x in model_inputs['''input_ids'''])
):
SCREAMING_SNAKE_CASE_ : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE_ : int = model_inputs.pop(self.model.main_input_name)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.generate(lowercase_ , **lowercase_ , **lowercase_)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''generated_text''': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_)
return records
| 91
|
"""simple docstring"""
import random
from typing import Any
def _A (__a ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 91
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : str = IFInpaintingSuperResolutionPipeline
_A : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_A : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_A : Dict = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Any=0 ) -> Dict:
"""simple docstring"""
if str(__lowerCamelCase ).startswith("""mps""" ):
__lowercase : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
__lowercase : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__lowercase : str = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__lowercase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__lowercase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 356
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase : str = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor'
SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if audios is not None:
a :Tuple = self.feature_extractor(
_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and audios is not None:
a :Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.tokenizer.model_input_names
a :str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 94
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> str:
if not (isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)):
raise ValueError("longest_common_substring() takes two strings for inputs")
a = len(__UpperCAmelCase)
a = len(__UpperCAmelCase)
a = [[0] * (texta_length + 1) for _ in range(texta_length + 1)]
a = 0
a = 0
for i in range(1 , texta_length + 1):
for j in range(1 , texta_length + 1):
if texta[i - 1] == texta[j - 1]:
a = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
a = i
a = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """table-transformer"""
a : Tuple = ["""past_key_values"""]
a : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A , A ):
a = backbone_config.get("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(A )
# set timm attributes to None
a , a , a = None, None, None
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = decoder_layerdrop
a = encoder_layers
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class a__ ( UpperCamelCase__ ):
a : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return 12
| 180
| 0
|
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> str: # noqa: E741
while r - l > 1:
__lowerCamelCase = (l + r) // 2
if v[m] >= key:
__lowerCamelCase = m
else:
__lowerCamelCase = m # noqa: E741
return r
def __magic_name__ ( __lowerCAmelCase : list[int] ) -> int:
if len(__lowerCAmelCase ) == 0:
return 0
__lowerCamelCase = [0] * len(__lowerCAmelCase )
__lowerCamelCase = 1
__lowerCamelCase = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
__lowerCamelCase = v[i]
elif v[i] > tail[length - 1]:
__lowerCamelCase = v[i]
length += 1
else:
__lowerCamelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270
| 1
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = BertConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
lowercase_ = BertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 297
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 1
|
"""simple docstring"""
__magic_name__ = [0, 2, 4, 6, 8]
__magic_name__ = [1, 3, 5, 7, 9]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__SCREAMING_SNAKE_CASE = 0
for digit in range(10 ):
__SCREAMING_SNAKE_CASE = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase_ , UpperCamelCase_ )
return result
__SCREAMING_SNAKE_CASE = 0
for digita in range(10 ):
__SCREAMING_SNAKE_CASE = digita
if (remainder + digita) % 2 == 0:
__SCREAMING_SNAKE_CASE = ODD_DIGITS
else:
__SCREAMING_SNAKE_CASE = EVEN_DIGITS
for digita in other_parity_digits:
__SCREAMING_SNAKE_CASE = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase_ , UpperCamelCase_ , )
return result
def _lowerCAmelCase ( UpperCamelCase_ = 9 ):
__SCREAMING_SNAKE_CASE = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase_ , 0 , [0] * length , UpperCamelCase_ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : Union[str, Any] = TypeVar("T")
UpperCAmelCase : Dict = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : int = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : Tuple = Union[str, bytes, os.PathLike]
| 136
| 0
|
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : int ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase_ : List[Any] = mf_knapsack(i - 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
UpperCamelCase_ : Union[str, Any] = max(
mf_knapsack(i - 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , mf_knapsack(i - 1 , UpperCAmelCase__ , UpperCAmelCase__ , j - wt[i - 1] ) + val[i - 1] , )
UpperCamelCase_ : Optional[int] = val
return f[i][j]
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
UpperCamelCase_ : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCamelCase_ : Any = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCamelCase_ : Optional[int] = dp[i - 1][w_]
return dp[n][w_], dp
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ):
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(UpperCAmelCase__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
UpperCamelCase_ : Union[str, Any] = len(UpperCAmelCase__ )
if num_items != len(UpperCAmelCase__ ):
UpperCamelCase_ : List[Any] = (
'The number of weights must be the same as the number of values.\n'
F"But got {num_items} weights and {len(UpperCAmelCase__ )} values"
)
raise ValueError(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
if not isinstance(wt[i] , UpperCAmelCase__ ):
UpperCamelCase_ : str = (
'All weights must be integers but got weight of '
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(UpperCAmelCase__ )
UpperCamelCase_, UpperCamelCase_ : Optional[int] = knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ : Any = set()
_construct_solution(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return optimal_val, example_optional_set
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCAmelCase__ , UpperCAmelCase__ , i - 1 , UpperCAmelCase__ , UpperCAmelCase__ )
else:
optimal_set.add(UpperCAmelCase__ )
_construct_solution(UpperCAmelCase__ , UpperCAmelCase__ , i - 1 , j - wt[i - 1] , UpperCAmelCase__ )
if __name__ == "__main__":
a_ = [3, 2, 4, 4]
a_ = [4, 3, 2, 3]
a_ = 4
a_ = 6
a_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a_ , a_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a_ , a_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 362
|
def __lowercase ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('only integers accepted as input' )
else:
UpperCamelCase_ : Any = str(abs(lowerCamelCase ) )
UpperCamelCase_ : Any = [list(lowerCamelCase ) for char in range(len(lowerCamelCase ) )]
for index in range(len(lowerCamelCase ) ):
num_transpositions[index].pop(lowerCamelCase )
return max(
int(''.join(list(lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 50
| 0
|
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : Tuple = ["speech"]
def __init__( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : str ) -> List[str]:
requires_backends(self , ["speech"] )
class snake_case_ ( metaclass=__A ):
__A : Any = ["speech"]
def __init__( self : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : int ) -> Any:
requires_backends(self , ["speech"] )
| 87
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : str=None ):
"""simple docstring"""
require_version(deps[pkg] , __UpperCamelCase )
| 361
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A__ = DDPMScheduler()
A__ = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.Generator(device=lowercase ).manual_seed(42 )
A__ = pipe(generator=lowercase , steps=4 )
A__ = output.audios[0]
A__ = output.images[0]
A__ = torch.Generator(device=lowercase ).manual_seed(42 )
A__ = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
A__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A__ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A__ = DDIMScheduler()
A__ = self.dummy_vqvae_and_unet
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
A__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A__ = torch.Generator(device=lowercase ).manual_seed(42 )
A__ = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=10 )
A__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A__ = self.dummy_unet_condition
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
A__ = torch.rand((1, 1, 10) )
A__ = pipe(generator=lowercase , encoding=lowercase )
A__ = output.images[0]
A__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = torch_device
A__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.Generator(device=lowercase ).manual_seed(42 )
A__ = pipe(generator=lowercase )
A__ = output.audios[0]
A__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 68
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE = 'src/transformers'
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*try:')
# Catches a line with else:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*else:')
def snake_case ( snake_case__ :Optional[Any]) -> List[str]:
if _re_test_backend.search(snake_case__) is None:
return None
_A = [b[0] for b in _re_backend.findall(snake_case__)]
backends.sort()
return "_and_".join(snake_case__)
def snake_case ( snake_case__ :Any) -> Any:
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
_A = f.readlines()
_A = 0
while line_index < len(snake_case__) and not lines[line_index].startswith("""_import_structure = {"""):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith("""if TYPE_CHECKING""") and find_backend(lines[line_index]) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__):
_A = _re_one_line_import_struct.search(snake_case__).groups()[0]
_A = re.findall("""\[([^\]]+)\]""" , snake_case__)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """)])
line_index += 1
continue
_A = _re_import_struct_key_value.search(snake_case__)
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """) if len(snake_case__) > 0]
objects.extend(snake_case__)
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING"""):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 4):
_A = lines[line_index]
if _re_import_struct_add_one.search(snake_case__) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__).groups()[0])
elif _re_import_struct_add_many.search(snake_case__) is not None:
_A = _re_import_struct_add_many.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_between_brackets.search(snake_case__) is not None:
_A = _re_between_brackets.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_quote_object.search(snake_case__) is not None:
objects.append(_re_quote_object.search(snake_case__).groups()[0])
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
elif line.startswith(""" """ * 12 + """\""""):
objects.append(line[13:-3])
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(snake_case__)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("""else""")
):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 8):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 12):
objects.append(line[12:-2])
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case ( snake_case__ :Dict , snake_case__ :int) -> List[Any]:
def find_duplicates(snake_case__ :Union[str, Any]):
return [k for k, v in collections.Counter(snake_case__).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''')
_A = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
_A = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def snake_case ( ) -> int:
_A = []
for root, _, files in os.walk(snake_case__):
if "__init__.py" in files:
_A = os.path.join(snake_case__ , """__init__.py""")
_A = parse_init(snake_case__)
if objects is not None:
_A = analyze_results(*snake_case__)
if len(snake_case__) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(snake_case__))
if len(snake_case__) > 0:
raise ValueError("""\n\n""".join(snake_case__))
def snake_case ( ) -> Optional[Any]:
_A = []
for path, directories, files in os.walk(snake_case__):
for folder in directories:
# Ignore private modules
if folder.startswith("""_"""):
directories.remove(snake_case__)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__) / folder).glob("""*.py"""))) == 0:
continue
_A = str((Path(snake_case__) / folder).relative_to(snake_case__))
_A = short_path.replace(os.path.sep , """.""")
submodules.append(snake_case__)
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(snake_case__) / fname).relative_to(snake_case__))
_A = short_path.replace(""".py""" , """""").replace(os.path.sep , """.""")
if len(submodule.split(""".""")) == 1:
submodules.append(snake_case__)
return submodules
_SCREAMING_SNAKE_CASE = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(snake_case__ , """__init__.py""") , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_A = spec.loader.load_module()
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case__) > 0:
_A = """\n""".join(F'''- {module}''' for module in module_not_registered)
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 180
| 0
|
import random
def snake_case_ ( snake_case , snake_case ) -> tuple:
lowercase__: Any = [], [], []
for element in data:
if element < pivot:
less.append(snake_case )
elif element > pivot:
greater.append(snake_case )
else:
equal.append(snake_case )
return less, equal, greater
def snake_case_ ( snake_case , snake_case ) -> Tuple:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(snake_case ) or index < 0:
return None
lowercase__: Optional[int] = items[random.randint(0 , len(snake_case ) - 1 )]
lowercase__: int = 0
lowercase__: Optional[Any] = _partition(snake_case , snake_case )
lowercase__: Any = len(snake_case )
lowercase__: List[str] = len(snake_case )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(snake_case , snake_case )
# must be in larger
else:
return quick_select(snake_case , index - (m + count) )
| 357
|
def snake_case_ ( snake_case ) -> list[int]:
lowercase__: Dict = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
lowercase__ , lowercase__: Union[str, Any] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__: List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__: List[str] = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__ , lowercase__: List[Any] = i, i + z_result[i] - 1
return z_result
def snake_case_ ( snake_case , snake_case , snake_case ) -> bool:
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__: Any = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 0
|
def UpperCAmelCase_ ( __lowerCAmelCase = 1_000_000 ) -> int:
__lowercase : Dict = limit + 1
__lowercase : Optional[Any] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase : int = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__lowercase : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 156
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[str] = TextToVideoSDPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Dict = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """np"""
SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 296
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Tuple = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def A_ ( A__ ) -> Union[str, Any]:
a__ : List[str] = split_dict._to_yaml_list()
assert len(A__ ) == len(A__ )
a__ : List[Any] = SplitDict._from_yaml_list(A__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
a__ : Any = None
# the split name of split_dict takes over the name of the split info object
a__ : str = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=A__ ), SplitInfo(dataset_name='my_dataset' )] )
def A_ ( A__ ) -> Any:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
a__ : Any = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 99
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50
| 0
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 319
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319
| 1
|
import os
from pathlib import Path
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : int ,__lowercase : int ):
'''simple docstring'''
A_ : List[str] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ : Dict = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
A_ : str = f'''{src_lang}-{tgt_lang}'''
A_ : Dict = f'''\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'''
os.makedirs(__lowercase ,exist_ok=__lowercase )
A_ : Optional[Any] = os.path.join(__lowercase ,'README.md' )
print(f'''Generating {path}''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
_UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase = model_name.split("""-""")
_UpperCAmelCase = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 140
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class A__ :
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int = 6) -> None:
"""simple docstring"""
__lowerCAmelCase : Node | None = None
__lowerCAmelCase : Node | None = None
self.create_linked_list(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: int) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = Node()
__lowerCAmelCase : str = current_node
__lowerCAmelCase : int = current_node
__lowerCAmelCase : Union[str, Any] = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = Node()
__lowerCAmelCase : int = current_node
__lowerCAmelCase : str = previous_node
__lowerCAmelCase : Optional[Any] = current_node
__lowerCAmelCase : List[Any] = self.front
__lowerCAmelCase : List[str] = previous_node
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Any) -> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__lowerCAmelCase : Optional[int] = self.rear.next
if self.rear:
__lowerCAmelCase : Optional[int] = data
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__lowerCAmelCase : str = self.front.data
__lowerCAmelCase : Tuple = None
return data
__lowerCAmelCase : List[Any] = self.front
__lowerCAmelCase : Tuple = old_front.next
__lowerCAmelCase : List[str] = old_front.data
__lowerCAmelCase : List[str] = None
return data
def _SCREAMING_SNAKE_CASE ( self: str) -> None:
"""simple docstring"""
if self.is_empty():
raise Exception("Empty Queue")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue")
class A__ :
'''simple docstring'''
def __init__( self: Optional[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Any | None = None
__lowerCAmelCase : Node | None = None
__lowerCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__snake_case : str = logging.get_logger(__name__)
# General docstring
__snake_case : Optional[int] = 'PoolFormerConfig'
# Base docstring
__snake_case : Any = 'sail/poolformer_s12'
__snake_case : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
__snake_case : List[Any] = 'sail/poolformer_s12'
__snake_case : Optional[Any] = 'tabby, tabby cat'
__snake_case : Union[str, Any] = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowercase ( __snake_case ,__snake_case = 0.0 ,__snake_case = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase : Optional[int] = 1 - drop_prob
__lowerCAmelCase : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase : List[str] = keep_prob + torch.rand(__snake_case ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
__lowerCAmelCase : Tuple = input.div(__snake_case ) * random_tensor
return output
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[float] = None) -> None:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = drop_prob
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: torch.Tensor) -> torch.Tensor:
"""simple docstring"""
return drop_path(_SCREAMING_SNAKE_CASE , self.drop_prob , self.training)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any=None) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = patch_size if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (patch_size, patch_size)
__lowerCAmelCase : Any = stride if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (stride, stride)
__lowerCAmelCase : Any = padding if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (padding, padding)
__lowerCAmelCase : Optional[int] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = norm_layer(_SCREAMING_SNAKE_CASE) if norm_layer else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.projection(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.norm(_SCREAMING_SNAKE_CASE)
return embeddings
class A__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
super().__init__(1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.AvgPoolad(_SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Dict:
"""simple docstring"""
return self.pool(_SCREAMING_SNAKE_CASE) - hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Tuple = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Any = PoolFormerDropPath(_SCREAMING_SNAKE_CASE)
if isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase : int = config.hidden_act
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.act_fn(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.drop(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.drop(_SCREAMING_SNAKE_CASE)
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[str] = PoolFormerPooling(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = PoolFormerOutput(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
# Useful for training neural nets
__lowerCAmelCase : Optional[int] = PoolFormerDropPath(_SCREAMING_SNAKE_CASE) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if self.use_layer_scale:
__lowerCAmelCase : int = self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__lowerCAmelCase : Optional[Any] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Union[str, Any] = self.output(self.after_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Dict = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__lowerCAmelCase : List[str] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (output,) + outputs
return outputs
else:
__lowerCAmelCase : Optional[Any] = self.drop_path(self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE)))
# First residual connection
__lowerCAmelCase : Optional[Any] = pooling_output + hidden_states
__lowerCAmelCase : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase : Any = self.drop_path(self.output(self.after_norm(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : str = hidden_states + layer_output
__lowerCAmelCase : List[Any] = (output,) + outputs
return outputs
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
__lowerCAmelCase : Tuple = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__lowerCAmelCase : List[str] = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__lowerCAmelCase : Tuple = nn.ModuleList(_SCREAMING_SNAKE_CASE)
# Transformer blocks
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Any = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__lowerCAmelCase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
_SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Union[str, Any] = nn.ModuleList(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=True) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = () if output_hidden_states else None
__lowerCAmelCase : Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__lowerCAmelCase , __lowerCAmelCase : str = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase : str = embedding_layer(_SCREAMING_SNAKE_CASE)
# Send the embeddings through the blocks
for _, blk in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = blk(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = 'poolformer'
SCREAMING_SNAKE_CASE = 'pixel_values'
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> Dict:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = value
__snake_case : Union[str, Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : str = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Any = PoolFormerEncoder(_SCREAMING_SNAKE_CASE)
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
__lowerCAmelCase : Union[str, Any] = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.dense(_SCREAMING_SNAKE_CASE)
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = config.num_labels
__lowerCAmelCase : Tuple = PoolFormerModel(_SCREAMING_SNAKE_CASE)
# Final norm
__lowerCAmelCase : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__lowerCAmelCase : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Union[str, Any] = self.poolformer(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = outputs[0]
__lowerCAmelCase : Optional[int] = self.classifier(self.norm(_SCREAMING_SNAKE_CASE).mean([-2, -1]))
__lowerCAmelCase : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = "single_label_classification"
else:
__lowerCAmelCase : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
__lowerCAmelCase : Dict = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__lowerCAmelCase : int = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
__lowerCAmelCase : Optional[int] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if not return_dict:
__lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states)
| 58
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCAmelCase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCAmelCase_ = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase_ = value
elif weight_type == "weight_g":
lowerCAmelCase_ = value
elif weight_type == "weight_v":
lowerCAmelCase_ = value
elif weight_type == "bias":
lowerCAmelCase_ = value
elif weight_type == "running_mean":
lowerCAmelCase_ = value
elif weight_type == "running_var":
lowerCAmelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase_ = value
elif weight_type == "inv_freq":
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = fairseq_model.state_dict()
lowerCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase_ = True
if "*" in mapped_key:
lowerCAmelCase_ = name.split(__lowerCamelCase )[0].split("." )[-2]
lowerCAmelCase_ = mapped_key.replace("*" , __lowerCamelCase )
if "pos_bias_u" in name:
lowerCAmelCase_ = None
elif "pos_bias_v" in name:
lowerCAmelCase_ = None
elif "weight_g" in name:
lowerCAmelCase_ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase_ = "weight_v"
elif "bias" in name:
lowerCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ = "weight"
elif "running_mean" in name:
lowerCAmelCase_ = "running_mean"
elif "inv_freq" in name:
lowerCAmelCase_ = "inv_freq"
elif "running_var" in name:
lowerCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCAmelCase_ = "num_batches_tracked"
else:
lowerCAmelCase_ = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ):
"""simple docstring"""
lowerCAmelCase_ = full_name.split("conv_layers." )[-1]
lowerCAmelCase_ = name.split("." )
lowerCAmelCase_ = int(items[0] )
lowerCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase_ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase_ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Union[str, Any]=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase_ = WavaVecaConformerConfig.from_pretrained(__lowerCamelCase , hidden_act="swish" )
else:
lowerCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
lowerCAmelCase_ = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase_ = target_dict.pad_index
lowerCAmelCase_ = target_dict.bos_index
lowerCAmelCase_ = target_dict.eos_index
lowerCAmelCase_ = len(target_dict.symbols )
lowerCAmelCase_ = os.path.join(__lowerCamelCase , "vocab.json" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase_ = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCamelCase , )
lowerCAmelCase_ = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowerCAmelCase_ = WavaVecaConformerForCTC(__lowerCamelCase )
else:
lowerCAmelCase_ = WavaVecaConformerForPreTraining(__lowerCamelCase )
if is_finetuned:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
lowerCAmelCase_ = fairseq.tasks.setup_task(__lowerCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
lowerCAmelCase_ = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_A = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 231
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Tuple ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = TFResNetModel(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
_snake_case = self.num_labels
_snake_case = TFResNetForImageClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__a = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] ):
_snake_case = TFResNetModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : List[Any] ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def lowercase ( self : Any ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def lowercase ( self : List[str] ):
pass
def lowercase ( self : int ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
_snake_case = model_class(_lowerCamelCase )
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFResNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
| 288
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = UnCLIPImageVariationPipeline
__snake_case = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
__snake_case = IMAGE_VARIATION_BATCH_PARAMS
__snake_case = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
__snake_case = False
@property
def UpperCamelCase__ ( self ):
return 32
@property
def UpperCamelCase__ ( self ):
return 32
@property
def UpperCamelCase__ ( self ):
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
return 1_00
@property
def UpperCamelCase__ ( self ):
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case_ = UnCLIPTextProjModel(**_UpperCAmelCase )
return model
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case_ = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def UpperCamelCase__ ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCamelCase__ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCamelCase__ ( self ):
snake_case_ = self.dummy_decoder
snake_case_ = self.dummy_text_proj
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_super_res_first
snake_case_ = self.dummy_super_res_last
snake_case_ = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
snake_case_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
snake_case_ = CLIPImageProcessor(crop_size=32 , size=32 )
snake_case_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=True ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
if pil_image:
snake_case_ = input_image * 0.5 + 0.5
snake_case_ = input_image.clamp(0 , 1 )
snake_case_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ = DiffusionPipeline.numpy_to_pil(_UpperCAmelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCAmelCase )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
snake_case_ = pipe(**_UpperCAmelCase )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
snake_case_ = pipe(
**_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCAmelCase )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
snake_case_ = pipe(**_UpperCAmelCase )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
snake_case_ = pipe(
**_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCAmelCase )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
snake_case_ = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case_ = pipe(**_UpperCAmelCase )
snake_case_ = output.images
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
snake_case_ = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case_ = pipe(
**_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
snake_case_ = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
snake_case_ = torch.device('''cpu''' )
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = 1
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_UpperCAmelCase )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe.decoder.dtype
snake_case_ = 1
snake_case_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case_ = pipe.prepare_latents(
_UpperCAmelCase , dtype=_UpperCAmelCase , device=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , scheduler=DummyScheduler() )
snake_case_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case_ = pipe.prepare_latents(
_UpperCAmelCase , dtype=_UpperCAmelCase , device=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , scheduler=DummyScheduler() )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
snake_case_ = pipe(
**_UpperCAmelCase , decoder_latents=_UpperCAmelCase , super_res_latents=_UpperCAmelCase ).images
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase , pil_image=_UpperCAmelCase )
# Don't pass image, instead pass embedding
snake_case_ = pipeline_inputs.pop('''image''' )
snake_case_ = pipe.image_encoder(_UpperCAmelCase ).image_embeds
snake_case_ = pipe(
**_UpperCAmelCase , decoder_latents=_UpperCAmelCase , super_res_latents=_UpperCAmelCase , image_embeddings=_UpperCAmelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def UpperCamelCase__ ( self ):
snake_case_ = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case_ = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCAmelCase , expected_max_diff=_UpperCAmelCase )
@skip_mps
def UpperCamelCase__ ( self ):
snake_case_ = torch_device == '''cpu'''
snake_case_ = True
snake_case_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , additional_params_copy_to_batched_inputs=_UpperCAmelCase , )
def UpperCamelCase__ ( self ):
snake_case_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_UpperCAmelCase , additional_params_copy_to_batched_inputs=_UpperCAmelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_UpperCAmelCase )
@skip_mps
def UpperCamelCase__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase__ ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case_ = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_ = pipeline(
_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase , 15 )
| 267
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> list[int]:
"""simple docstring"""
snake_case_ = int(SCREAMING_SNAKE_CASE )
# Initialize Result
snake_case_ = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE ):
# Find denominations
while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ):
total_value -= int(SCREAMING_SNAKE_CASE )
answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase = []
UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 267
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : int = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
|
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 6
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : str = DPTConfig()
if "large" in checkpoint_url:
lowerCAmelCase__ : str = 1_024
lowerCAmelCase__ : str = 4_096
lowerCAmelCase__ : Optional[Any] = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Dict = [5, 11, 17, 23]
lowerCAmelCase__ : Optional[int] = [256, 512, 1_024, 1_024]
lowerCAmelCase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = 150
lowerCAmelCase__ : str = 'huggingface/label-files'
lowerCAmelCase__ : List[str] = 'ade20k-id2label.json'
lowerCAmelCase__ : Any = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase__ : Optional[int] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase__ : str = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase__ : Optional[int] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase__ : Any = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
lowerCAmelCase__ : Optional[int] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase__ : str = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase__ : Optional[int] = name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase__ : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
lowerCAmelCase__ : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase__ : Any = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCAmelCase__ : Tuple = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCAmelCase__ : Dict = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase__ : List[str] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase__ : Optional[int] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCAmelCase__ : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase__ : List[str] = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCAmelCase__ : Optional[int] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase__ : str = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase__ : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase__ : Tuple = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase__ : str = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase__ : Dict = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase__ : List[str] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase__ : Tuple = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase__ : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase__ : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase__ : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : int = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Tuple = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_dpt_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict from URL
lowerCAmelCase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ : str = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCAmelCase__ : Any = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# Check outputs on an image
lowerCAmelCase__ : str = 480 if 'ade' in checkpoint_url else 384
lowerCAmelCase__ : int = DPTImageProcessor(size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
# forward pass
lowerCAmelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).logits if 'ade' in checkpoint_url else model(**SCREAMING_SNAKE_CASE_ ).predicted_depth
# Assert logits
lowerCAmelCase__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
lowerCAmelCase__ : str = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE_ )
)
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 319
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__A : Union[str, Any] = 'src/diffusers'
# Matches is_xxx_available()
__A : Dict = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__A : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__A : Optional[Any] = '\n{0} = None\n'
__A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__A : Tuple = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __UpperCamelCase ( _A : Optional[Any] ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(_A )
if len(_A ) == 0:
return None
return "_and_".join(_A )
def __UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
with open(os.path.join(_A , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(_A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(_A ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_A ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def __UpperCamelCase ( _A : Union[str, Any] , _A : int ) ->Optional[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_A )
elif name.islower():
return DUMMY_FUNCTION.format(_A , _A )
else:
return DUMMY_CLASS.format(_A , _A )
def __UpperCamelCase ( _A : Any=None ) ->Any:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ="""[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase_ ="""# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_A , _A ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def __UpperCamelCase ( _A : Dict=False ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(_A , """utils""" )
lowerCamelCase_ ={
backend: os.path.join(_A , f'dummy_{short_names.get(_A , _A )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_A ):
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =""""""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 49
| 0
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase ( __lowerCamelCase : Callable ) ->Callable:
@wraps(__lowerCamelCase )
def _inner_fn(*__lowerCamelCase : Optional[int] , **__lowerCamelCase : Tuple ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , __lowerCamelCase , )
return fn(*__lowerCamelCase , **__lowerCamelCase )
return _inner_fn
| 58
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase_ = logging.getLogger(__name__)
lowercase_ = """Hello world! cécé herlolip"""
lowercase_ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
_SCREAMING_SNAKE_CASE = AbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) , __lowerCamelCase )
original.eval()
_SCREAMING_SNAKE_CASE = BertAbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_SCREAMING_SNAKE_CASE = encoder_input_ids
_SCREAMING_SNAKE_CASE = decoder_input_ids
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_SCREAMING_SNAKE_CASE = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = original.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = new_model.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 58
| 1
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
UpperCamelCase__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 102
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 267
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" , return_dict=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """google/ddpm-cifar10-32"""
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 267
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class a ( __lowerCamelCase ):
def __init__( self , **_snake_case ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(UpperCamelCase_ )
def __call__( self , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowerCAmelCase = kwargs.pop('text_queries' )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
lowerCAmelCase = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCAmelCase = image
lowerCAmelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
if "threshold" in kwargs:
lowerCAmelCase = kwargs['threshold']
if "top_k" in kwargs:
lowerCAmelCase = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = load_image(inputs['image'] )
lowerCAmelCase = inputs['candidate_labels']
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase = candidate_labels.split(',' )
lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
lowerCAmelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
lowerCAmelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = model_inputs.pop('target_size' )
lowerCAmelCase = model_inputs.pop('candidate_label' )
lowerCAmelCase = model_inputs.pop('is_last' )
lowerCAmelCase = self.model(**UpperCamelCase_ )
lowerCAmelCase = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase__ ( self , _snake_case , _snake_case=0.1 , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = []
for model_output in model_outputs:
lowerCAmelCase = model_output['candidate_label']
lowerCAmelCase = BaseModelOutput(UpperCamelCase_ )
lowerCAmelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase = outputs['scores'][index].item()
lowerCAmelCase = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCAmelCase = {'score': score, 'label': label, 'box': box}
results.append(UpperCamelCase_ )
lowerCAmelCase = sorted(UpperCamelCase_ , key=lambda _snake_case : x["score"] , reverse=UpperCamelCase_ )
if top_k:
lowerCAmelCase = results[:top_k]
return results
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = box.int().tolist()
lowerCAmelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 357
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case__ = []
for i in range(_A ):
snake_case__ = i / num_diffusion_timesteps
snake_case__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE( a_ , a_ ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str:
if trained_betas is not None:
snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case__ = 1.0 - self.betas
snake_case__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = use_karras_sigmas
def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str:
if schedule_timesteps is None:
snake_case__ = self.timesteps
snake_case__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0
else:
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
snake_case__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
snake_case__ = self.index_for_timestep(UpperCamelCase )
snake_case__ = self.sigmas[step_index]
snake_case__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str:
snake_case__ = num_inference_steps
snake_case__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ = np.log(UpperCamelCase )
snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase )
if self.config.use_karras_sigmas:
snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps )
snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] )
snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase )
snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ = torch.from_numpy(UpperCamelCase )
snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase ).startswith('mps' ):
# mps does not support float64
snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa )
else:
snake_case__ = timesteps.to(device=UpperCamelCase )
# empty dt and derivative
snake_case__ = None
snake_case__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ = defaultdict(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple:
# get log sigma
snake_case__ = np.log(UpperCamelCase )
# get distribution
snake_case__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ = low_idx + 1
snake_case__ = log_sigmas[low_idx]
snake_case__ = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ = (low - log_sigma) / (low - high)
snake_case__ = np.clip(UpperCamelCase , 0 , 1 )
# transform interpolation to time range
snake_case__ = (1 - w) * low_idx + w * high_idx
snake_case__ = t.reshape(sigma.shape )
return t
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor:
snake_case__ = in_sigmas[-1].item()
snake_case__ = in_sigmas[0].item()
snake_case__ = 7.0 # 7.0 is the value used in the paper
snake_case__ = np.linspace(0 , 1 , UpperCamelCase )
snake_case__ = sigma_min ** (1 / rho)
snake_case__ = sigma_max ** (1 / rho)
snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
return self.dt is None
def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case__ = self.index_for_timestep(UpperCamelCase )
# advance index counter by 1
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ = self.sigmas[step_index]
snake_case__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ = self.sigmas[step_index - 1]
snake_case__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ = 0
snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ = derivative
snake_case__ = dt
snake_case__ = sample
else:
# 2. 2nd order / Heun's method
snake_case__ = (sample - pred_original_sample) / sigma_next
snake_case__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ = self.dt
snake_case__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ):
# mps does not support float64
snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ = self.timesteps.to(original_samples.device )
snake_case__ = timesteps.to(original_samples.device )
snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps]
snake_case__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ = sigma.unsqueeze(-1 )
snake_case__ = original_samples + noise * sigma
return noisy_samples
def __len__( self: List[Any] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 307
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case = ''''''
else:
snake_case = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
snake_case = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = dct.pop(UpperCamelCase_ )
snake_case = val
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ViTMSNConfig()
snake_case = 10_00
snake_case = '''datasets/huggingface/label-files'''
snake_case = '''imagenet-1k-id2label.json'''
snake_case = json.load(open(hf_hub_download(UpperCamelCase_ ,UpperCamelCase_ ) ,'''r''' ) )
snake_case = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case = 3_84
snake_case = 15_36
snake_case = 6
elif "l16" in checkpoint_url:
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 0.1
elif "b4" in checkpoint_url:
snake_case = 4
elif "l7" in checkpoint_url:
snake_case = 7
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 0.1
snake_case = ViTMSNModel(UpperCamelCase_ )
snake_case = torch.hub.load_state_dict_from_url(UpperCamelCase_ ,map_location='''cpu''' )['''target_encoder''']
snake_case = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
snake_case = create_rename_keys(UpperCamelCase_ ,base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ ,UpperCamelCase_ ,base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(UpperCamelCase_ ,stream=UpperCamelCase_ ).raw )
snake_case = ViTImageProcessor(
size=config.image_size ,image_mean=UpperCamelCase_ ,image_std=UpperCamelCase_ )
snake_case = image_processor(images=UpperCamelCase_ ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case = model(**UpperCamelCase_ )
snake_case = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
snake_case = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
snake_case = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
snake_case = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
snake_case = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,UpperCamelCase_ ,atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 213
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 213
| 1
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase , __lowerCamelCase : Dict = analyze_text(lowerCamelCase__ )
__lowerCamelCase : List[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCamelCase : int = sum(single_char_strings.values() )
# one length string
__lowerCamelCase : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCamelCase : str = single_char_strings[ch]
__lowerCamelCase : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase__ ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__lowerCamelCase : Tuple = sum(two_char_strings.values() )
__lowerCamelCase : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCamelCase : str = cha + cha
if sequence in two_char_strings:
__lowerCamelCase : str = two_char_strings[sequence]
__lowerCamelCase : Optional[int] = int(lowerCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase__ )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[dict, dict]:
__lowerCamelCase : List[str] = Counter() # type: ignore
__lowerCamelCase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 73
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase : List[Any] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
lowerCamelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a )
return config
def _lowerCAmelCase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def _lowerCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def _lowerCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if str(_a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 168
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='mobilenet_v1'
def __init__(self , a_=3 , a_=2_24 , a_=1.0 , a_=8 , a_="relu6" , a_=True , a_=0.999 , a_=0.02 , a_=0.001 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__snake_case : Union[str, Any] = num_channels
__snake_case : Optional[int] = image_size
__snake_case : Dict = depth_multiplier
__snake_case : List[str] = min_depth
__snake_case : Tuple = hidden_act
__snake_case : Tuple = tf_padding
__snake_case : List[Any] = classifier_dropout_prob
__snake_case : Optional[int] = initializer_range
__snake_case : Any = layer_norm_eps
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
| 102
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6, metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
}, )
lowerCamelCase__ =field(
default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1, metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ) ->Any:
"""simple docstring"""
def _dataset(_snake_case : List[Any] , _snake_case : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case : List[Any] = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case : Optional[Any] = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case : Any = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__snake_case : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__snake_case : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Dict = math.exp(eval_output['''eval_loss'''] )
__snake_case : List[Any] = {'''perplexity''': perplexity}
__snake_case : str = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102
| 1
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ = 'src/diffusers'
# Matches is_xxx_available()
a_ = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
a_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
a_ = '\n{0} = None\n'
a_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
a_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =_re_backend.findall(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 0:
return None
return "_and_".join(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
with open(os.path.join(UpperCamelCase__, '''__init__.py''' ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] =f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : List[str] ={}
# Go through the end of the file
while line_index < len(UpperCamelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE__ : List[Any] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(UpperCamelCase__ ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] =lines[line_index]
SCREAMING_SNAKE_CASE__ : Optional[Any] =_re_single_line_import.search(UpperCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Any =objects
else:
line_index += 1
return backend_specific_objects
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(UpperCamelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCamelCase__, UpperCamelCase__ )
else:
return DUMMY_CLASS.format(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Any=None ):
'''simple docstring'''
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE__ : Tuple ='''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
SCREAMING_SNAKE_CASE__ : List[str] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCamelCase__, UpperCamelCase__ ) for o in objects] )
SCREAMING_SNAKE_CASE__ : Tuple =dummy_file
return dummy_files
def _a( UpperCamelCase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE__ : List[str] ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''utils''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
backend: os.path.join(UpperCamelCase__, f"dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE__ : str ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =f.read()
else:
SCREAMING_SNAKE_CASE__ : int =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend], '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 222
|
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[Any]=2_8_1_2_3 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : List[str] =set()
SCREAMING_SNAKE_CASE__ : str =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCamelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 222
| 1
|
__UpperCAmelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__UpperCAmelCase = ['a', 'b', 'c', 'd', 'e']
def lowercase__ ( __snake_case : List[str] , __snake_case : str , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = start
# add current to visited
visited.append(__snake_case )
UpperCAmelCase_ : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase_ : str = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase_ : Optional[Any] = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase = topological_sort('a', [], [])
print(sort)
| 29
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase_ ( lowerCAmelCase: str )-> List[Any]:
_snake_case : Optional[int] = FileLock(str(tmpdir / 'foo.lock' ) )
_snake_case : int = FileLock(str(tmpdir / 'foo.lock' ) )
_snake_case : Any = 0.0_1
with locka.acquire():
with pytest.raises(lowerCAmelCase ):
_snake_case : Tuple = time.time()
locka.acquire(lowerCAmelCase )
assert time.time() - _start > timeout
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> str:
_snake_case : Tuple = 'a' * 10_00 + '.lock'
_snake_case : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
_snake_case : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase ):
locka.acquire(0 )
| 260
|
from __future__ import annotations
lowerCAmelCase_ = []
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int )-> bool:
for i in range(len(lowerCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , len(lowerCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int )-> bool:
if row >= len(lowerCAmelCase ):
solution.append(lowerCAmelCase )
printboard(lowerCAmelCase )
print()
return True
for i in range(len(lowerCAmelCase ) ):
if is_safe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = 1
solve(lowerCAmelCase , row + 1 )
_snake_case : str = 0
return False
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] )-> None:
for i in range(len(lowerCAmelCase ) ):
for j in range(len(lowerCAmelCase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase_ = 8
lowerCAmelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 260
| 1
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
__SCREAMING_SNAKE_CASE =6_37_81_37.0
__SCREAMING_SNAKE_CASE =6_35_67_52.31_42_45
__SCREAMING_SNAKE_CASE =637_8137
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
lowercase_ : Any = (AXIS_A - AXIS_B) / AXIS_A
lowercase_ : Dict = atan((1 - flattening) * tan(radians(__SCREAMING_SNAKE_CASE ) ) )
lowercase_ : Optional[Any] = atan((1 - flattening) * tan(radians(__SCREAMING_SNAKE_CASE ) ) )
lowercase_ : Optional[Any] = radians(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = radians(__SCREAMING_SNAKE_CASE )
# Equation
lowercase_ : Optional[Any] = sin((phi_a - phi_a) / 2 )
lowercase_ : Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase_ : Tuple = sqrt(sin_sq_phi + (cos(__SCREAMING_SNAKE_CASE ) * cos(__SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213
|
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : str = data
# Initialize hash values
lowercase_ : Optional[int] = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowercase_ : Tuple = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
lowercase_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> bytes:
'''simple docstring'''
lowercase_ : str = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64))
lowercase_ : str = struct.pack('>Q' ,(len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase_ : Any = list(struct.unpack('>16L' ,__UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase_ : str = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowercase_ : int = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowercase_ : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowercase_ : Tuple = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 )
lowercase_ : Union[str, Any] = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
lowercase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowercase_ : Optional[int] = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 )
lowercase_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
lowercase_ : Any = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowercase_ : str = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowercase_ : Any = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
import hashlib
lowercase_ : Union[str, Any] = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def lowercase__( ):
import doctest
doctest.testmod()
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase_ : Any = parser.parse_args()
lowercase_ : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase_ : str = f.read()
else:
lowercase_ : Optional[int] = bytes(__SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 213
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCamelCase : int = 4
__lowerCamelCase : Dict = 3
class __snake_case ( lowerCamelCase_ ):
pass
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for shard in shards:
for i in range(__UpperCamelCase ):
yield {"i": i, "shard": shard}
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(os.environ["""RANK"""] )
SCREAMING_SNAKE_CASE__ = int(os.environ["""WORLD_SIZE"""] )
SCREAMING_SNAKE_CASE__ = ArgumentParser()
parser.add_argument("""--streaming""" , type=__UpperCamelCase )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase )
parser.add_argument("""--num_workers""" , type=__UpperCamelCase , default=0 )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.streaming
SCREAMING_SNAKE_CASE__ = args.num_workers
SCREAMING_SNAKE_CASE__ = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(__UpperCamelCase )]}
SCREAMING_SNAKE_CASE__ = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase )
if not streaming:
SCREAMING_SNAKE_CASE__ = Dataset.from_list(list(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 204
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 204
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = "RegNetConfig"
# Base docstring
UpperCAmelCase = "facebook/regnet-y-040"
UpperCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
UpperCAmelCase = "facebook/regnet-y-040"
UpperCAmelCase = "tabby, tabby cat"
UpperCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( nn.Module):
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict = 3 , __UpperCamelCase : List[Any] = 1 , __UpperCamelCase : Tuple = 1 , __UpperCamelCase : Optional[int] = "relu" , ) -> Union[str, Any]:
super().__init__()
_UpperCamelCase = nn.Convad(
__UpperCamelCase , __UpperCamelCase , kernel_size=__UpperCamelCase , stride=__UpperCamelCase , padding=kernel_size // 2 , groups=__UpperCamelCase , bias=__UpperCamelCase , )
_UpperCamelCase = nn.BatchNormad(__UpperCamelCase )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[str] ) -> Optional[int]:
_UpperCamelCase = self.convolution(__UpperCamelCase )
_UpperCamelCase = self.normalization(__UpperCamelCase )
_UpperCamelCase = self.activation(__UpperCamelCase )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self : Tuple , __UpperCamelCase : int ) -> str:
super().__init__()
_UpperCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = config.num_channels
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Dict ) -> Dict:
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(__UpperCamelCase )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] = 2 ) -> str:
super().__init__()
_UpperCamelCase = nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , stride=__UpperCamelCase , bias=__UpperCamelCase )
_UpperCamelCase = nn.BatchNormad(__UpperCamelCase )
def _UpperCamelCase ( self : str , __UpperCamelCase : Optional[Any] ) -> Tensor:
_UpperCamelCase = self.convolution(__UpperCamelCase )
_UpperCamelCase = self.normalization(__UpperCamelCase )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ) -> Any:
super().__init__()
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
_UpperCamelCase = nn.Sequential(
nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.Sigmoid() , )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] ) -> Any:
# b c h w -> b c 1 1
_UpperCamelCase = self.pooler(__UpperCamelCase )
_UpperCamelCase = self.attention(__UpperCamelCase )
_UpperCamelCase = hidden_state * attention
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] = 1 ) -> int:
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
_UpperCamelCase = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any ) -> Optional[int]:
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(__UpperCamelCase )
_UpperCamelCase = self.shortcut(__UpperCamelCase )
hidden_state += residual
_UpperCamelCase = self.activation(__UpperCamelCase )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : str = 1 ) -> int:
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetSELayer(__UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
_UpperCamelCase = ACTaFN[config.hidden_act]
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[str, Any] ) -> Tuple:
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(__UpperCamelCase )
_UpperCamelCase = self.shortcut(__UpperCamelCase )
hidden_state += residual
_UpperCamelCase = self.activation(__UpperCamelCase )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str = 2 , __UpperCamelCase : Tuple = 2 , ) -> Union[str, Any]:
super().__init__()
_UpperCamelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , ) , *[layer(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for _ in range(depth - 1 )] , )
def _UpperCamelCase ( self : int , __UpperCamelCase : str ) -> List[Any]:
_UpperCamelCase = self.layers(__UpperCamelCase )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self : List[Any] , __UpperCamelCase : List[str] ) -> Tuple:
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCamelCase , config.depths[1:] ):
self.stages.append(RegNetStage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , depth=__UpperCamelCase ) )
def _UpperCamelCase ( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] = False , __UpperCamelCase : Any = True ) -> BaseModelOutputWithNoAttention:
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(__UpperCamelCase )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCamelCase , hidden_states=__UpperCamelCase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
snake_case__ = RegNetConfig
snake_case__ = '''regnet'''
snake_case__ = '''pixel_values'''
snake_case__ = True
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[str, Any] ) -> Dict:
if isinstance(__UpperCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=False ) -> Any:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = value
UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , _SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
def __init__( self : int , __UpperCamelCase : List[str] ) -> str:
super().__init__(__UpperCamelCase )
_UpperCamelCase = config
_UpperCamelCase = RegNetEmbeddings(__UpperCamelCase )
_UpperCamelCase = RegNetEncoder(__UpperCamelCase )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] = None , __UpperCamelCase : str = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(__UpperCamelCase )
_UpperCamelCase = self.encoder(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(__UpperCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCamelCase , pooler_output=__UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
def __init__( self : List[Any] , __UpperCamelCase : Any ) -> Dict:
super().__init__(__UpperCamelCase )
_UpperCamelCase = config.num_labels
_UpperCamelCase = RegNetModel(__UpperCamelCase )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Tuple = None , __UpperCamelCase : Any = None , __UpperCamelCase : Tuple = None , __UpperCamelCase : Union[str, Any] = None , ) -> ImageClassifierOutputWithNoAttention:
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(__UpperCamelCase )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states )
| 256
|
'''simple docstring'''
import math
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = 0.0
_a = 0.0
for i in range(len(__magic_name__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> list[list[int | float]]:
for i in range(len(__magic_name__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _A () -> None:
'''simple docstring'''
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# Update the winning vector
_a = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 168
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) < 2:
return collection
def circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
__SCREAMING_SNAKE_CASE = False
if low == high:
return swapped
__SCREAMING_SNAKE_CASE = low
__SCREAMING_SNAKE_CASE = high
while left < right:
if collection[left] > collection[right]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
collection[right],
collection[left],
)
__SCREAMING_SNAKE_CASE = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
collection[right + 1],
collection[left],
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = low + int((high - low) / 2 )
__SCREAMING_SNAKE_CASE = circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = circle_sort_util(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
return swapped or left_swap or right_swap
__SCREAMING_SNAKE_CASE = True
while is_not_sorted is True:
__SCREAMING_SNAKE_CASE = circle_sort_util(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
a__ : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a__ : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 371
|
"""simple docstring"""
from datetime import datetime
import requests
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__SCREAMING_SNAKE_CASE = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
a__ : str = input('''Enter Video/IGTV url: ''').strip()
a__ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 195
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase = pipeline(
'video-classification' , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 222
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "spiece.model"}
_UpperCAmelCase : Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
_UpperCAmelCase : int = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[Any] = ["input_ids", "attention_mask"]
__lowercase : List[int] = []
def __init__( self , A_ , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="[SEP]" , A_="[MASK]" , A_="[CLS]" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , mask_token=A_ , cls_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.sp_model.IdToPiece(A_ )
return token
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __UpperCamelCase ( self , A_ , A_ = False , A_ = None , A_ = True , **A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = kwargs.pop('use_source_tokenizer' , A_ )
UpperCamelCase = self.convert_ids_to_tokens(A_ , skip_special_tokens=A_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
UpperCamelCase = []
sub_texts.append(A_ )
else:
current_sub_text.append(A_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCamelCase = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(A_ ) )
else:
UpperCamelCase = ''.join(A_ )
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(A_ )
return clean_text
else:
return text
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 222
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Any=[10, 20, 30, 40] , UpperCamelCase__ : Any=[1, 1, 2, 1] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : int = image_size
snake_case : Any = num_channels
snake_case : Optional[int] = embeddings_size
snake_case : Optional[int] = hidden_sizes
snake_case : str = depths
snake_case : Tuple = is_training
snake_case : List[str] = use_labels
snake_case : List[str] = hidden_act
snake_case : Tuple = num_labels
snake_case : Tuple = scope
snake_case : List[str] = len(UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Any = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModel(config=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : List[str] = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : str = self.prepare_config_and_inputs()
snake_case ,snake_case : Tuple = config_and_inputs
snake_case : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : int = [*signature.parameters.keys()]
snake_case : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest('''JIT Enabled''' ):
snake_case : Optional[int] = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case : Tuple = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
snake_case : str = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
snake_case : Any = self.default_image_processor
snake_case : Any = prepare_img()
snake_case : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors='''np''' )
snake_case : List[str] = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Dict = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 83
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase = True
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase = True
if a[i].islower():
_UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[str] = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 260
| 1
|
"""simple docstring"""
from math import isqrt
def A ( snake_case :int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case ) + 1 ) )
def A ( snake_case :int = 1_0**6 ) -> int:
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 263
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = num_labels
__UpperCamelCase = initializer_range
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__UpperCamelCase = problem_type['title']
__UpperCamelCase = problem_type['num_labels']
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__UpperCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__UpperCamelCase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Union[str, Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 263
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : list[float] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCamelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase ) )
return round(lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A:
'''simple docstring'''
def __init__( self : str , A_ : Optional[Any] , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 2
lowerCamelCase_ = 99
lowerCamelCase_ = 0
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 'last'
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = 0
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase_ = None
if self.use_input_lengths:
lowerCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int , A_ : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : int , A_ : Tuple , A_ : Optional[int] , A_ : Optional[int] , A_ : str , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Tuple , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : Any , A_ : Any , A_ : Dict , A_ : str , A_ : List[Any] , A_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertWithLMHeadModel(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , A_ : Tuple , A_ : Any , A_ : Any , A_ : List[Any] , A_ : Dict , A_ : List[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForQuestionAnsweringSimple(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[int] , A_ : List[Any] , A_ : str , A_ : List[str] , A_ : Dict , A_ : Optional[Any] , A_ : Tuple , A_ : str , A_ : Optional[int] , A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForSequenceClassification(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[Any] , A_ : int , A_ : Any , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Union[str, Any] , A_ : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFFlaubertForTokenClassification(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : str , A_ : Tuple , A_ : str , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFFlaubertForMultipleChoice(config=A_ )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Union[str, Any] , A_ : Any , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCamelCase_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 204
| 1
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCAmelCase__ = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple =1
a : Dict =2
a : Tuple =3
a : Optional[int] =4
a : List[str] =5
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : jnp.ndarray
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : str =SCHEDULER_CONFIG_NAME
a : Optional[int] =["dtype"]
a : Any =[]
a : Dict =True
@classmethod
def lowercase__ ( cls , snake_case__ = None , snake_case__ = None , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Any = cls.load_config(
pretrained_model_name_or_path=snake_case__ , subfolder=snake_case__ , return_unused_kwargs=snake_case__ , **snake_case__ , )
lowerCAmelCase , lowerCAmelCase : int = cls.from_config(snake_case__ , return_unused_kwargs=snake_case__ , **snake_case__ )
if hasattr(snake_case__ , "create_state" ) and getattr(snake_case__ , "has_state" , snake_case__ ):
lowerCAmelCase : str = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase__ ( self , snake_case__ , snake_case__ = False , **snake_case__ ):
"""simple docstring"""
self.save_config(save_directory=snake_case__ , push_to_hub=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase : str = importlib.import_module(__name__.split("." )[0] )
lowerCAmelCase : Union[str, Any] = [
getattr(snake_case__ , snake_case__ ) for c in compatible_classes_str if hasattr(snake_case__ , snake_case__ )
]
return compatible_classes
def a__ ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ):
'''simple docstring'''
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str=0.999 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase : Union[str, Any] = []
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = i / num_diffusion_timesteps
lowerCAmelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : jnp.ndarray
a : jnp.ndarray
a : jnp.ndarray
@classmethod
def lowercase__ ( cls , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase : Any = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase : Dict = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase : Union[str, Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowerCAmelCase : List[str] = 1.0 - betas
lowerCAmelCase : List[str] = jnp.cumprod(snake_case__ , axis=0 )
return cls(
alphas=snake_case__ , betas=snake_case__ , alphas_cumprod=snake_case__ , )
def a__ ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
'''simple docstring'''
lowerCAmelCase : Dict = state.alphas_cumprod
lowerCAmelCase : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase : int = sqrt_alpha_prod.flatten()
lowerCAmelCase : Union[str, Any] = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
lowerCAmelCase : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase : Tuple = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a__ ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Dict = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a__ ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 133
|
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = data
lowerCAmelCase : Tuple = None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" " )
lowerCAmelCase : List[str] = temp.next
print()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = Node(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.head
lowerCAmelCase : Optional[Any] = new_node
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
lowerCAmelCase : str = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : Union[str, Any] = node_a.next
lowerCAmelCase : Any = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
lowerCAmelCase , lowerCAmelCase : str = node_a.data, node_a.data
if __name__ == "__main__":
lowerCAmelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 133
| 1
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
print(F'''Vertex\tShortest Distance from vertex {src}''')
for i, d in enumerate(__SCREAMING_SNAKE_CASE):
print(F'''{i}\t\t{d}''')
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
for j in range(__SCREAMING_SNAKE_CASE):
__snake_case , __snake_case , __snake_case: List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""") and distance[u] + w < distance[v]:
return True
return False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: List[str] = [float("""inf""")] * vertex_count
__snake_case: Tuple = 0.0
for _ in range(vertex_count - 1):
for j in range(__SCREAMING_SNAKE_CASE):
__snake_case , __snake_case , __snake_case: Optional[int] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""") and distance[u] + w < distance[v]:
__snake_case: Dict = distance[u] + w
__snake_case: Optional[Any] = check_negative_cycle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if negative_cycle_exists:
raise Exception("""Negative cycle found""")
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : int = int(input("Enter number of vertices: ").strip())
__UpperCAmelCase : Optional[Any] = int(input("Enter number of edges: ").strip())
__UpperCAmelCase : Dict = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__UpperCAmelCase : Dict = {"src": src, "dst": dest, "weight": weight}
__UpperCAmelCase : List[str] = int(input("\nEnter shortest path source:").strip())
__UpperCAmelCase : Dict = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 111
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ( ):
lowercase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def UpperCAmelCase_ ( ):
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 195
| 0
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_ = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_ = torch.tensor(
__lowerCamelCase, dtype=torch.intaa, device=protein['''aatype'''].device, )
SCREAMING_SNAKE_CASE_ = torch.tensor(
__lowerCamelCase, dtype=torch.intaa, device=protein['''aatype'''].device, )
SCREAMING_SNAKE_CASE_ = torch.tensor(
__lowerCamelCase, dtype=torch.floataa, device=protein['''aatype'''].device, )
SCREAMING_SNAKE_CASE_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_ = residx_atomaa_mask
SCREAMING_SNAKE_CASE_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_ = torch.zeros([21, 37], dtype=torch.floataa, device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_ = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_ = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_ = residx_atomaa_mask
return protein
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase, device=batch['''aatype'''].device ), __lowerCamelCase, np.ndarray )
SCREAMING_SNAKE_CASE_ = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ), make_atomaa_masks(__lowerCamelCase ) )
return out
| 257
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **_A ) -> Union[str, Any]:
super().__init__(**_A )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(_A )
def __call__( self , _A , _A = None , **_A , ) -> str:
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text_queries''' )
if isinstance(_A , (str, Image.Image) ):
SCREAMING_SNAKE_CASE_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = super().__call__(_A , **_A )
return results
def _UpperCamelCase ( self , **_A ) -> str:
SCREAMING_SNAKE_CASE_ = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''threshold''']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def _UpperCamelCase ( self , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = load_image(inputs['''image'''] )
SCREAMING_SNAKE_CASE_ = inputs['''candidate_labels''']
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = candidate_labels.split(''',''' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=self.framework )
yield {
"is_last": i == len(_A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCamelCase ( self , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''target_size''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''candidate_label''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''is_last''' )
SCREAMING_SNAKE_CASE_ = self.model(**_A )
SCREAMING_SNAKE_CASE_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _UpperCamelCase ( self , _A , _A=0.1 , _A=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE_ = model_output['''candidate_label''']
SCREAMING_SNAKE_CASE_ = BaseModelOutput(_A )
SCREAMING_SNAKE_CASE_ = self.image_processor.post_process_object_detection(
outputs=_A , threshold=_A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE_ = outputs['''scores'''][index].item()
SCREAMING_SNAKE_CASE_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
SCREAMING_SNAKE_CASE_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(_A )
SCREAMING_SNAKE_CASE_ = sorted(_A , key=lambda _A : x["score"] , reverse=_A )
if top_k:
SCREAMING_SNAKE_CASE_ = results[:top_k]
return results
def _UpperCamelCase ( self , _A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = box.int().tolist()
SCREAMING_SNAKE_CASE_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 257
| 1
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Dict = ['a', 'b', 'c']
# Defaults to last layer if both are None
_UpperCamelCase , _UpperCamelCase : Any = get_aligned_output_features_output_indices(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,['c'] )
self.assertEqual(lowerCamelCase__ ,[2] )
# Out indices set to match out features
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(['a', 'c'] ,lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,['a', 'c'] )
self.assertEqual(lowerCamelCase__ ,[0, 2] )
# Out features set to match out indices
_UpperCamelCase , _UpperCamelCase : List[Any] = get_aligned_output_features_output_indices(lowerCamelCase__ ,[0, 2] ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,['a', 'c'] )
self.assertEqual(lowerCamelCase__ ,[0, 2] )
# Out features selected from negative indices
_UpperCamelCase , _UpperCamelCase : Any = get_aligned_output_features_output_indices(lowerCamelCase__ ,[-3, -1] ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,['a', 'c'] )
self.assertEqual(lowerCamelCase__ ,[-3, -1] )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# Stage names must be set
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] ,(0, 1) ,lowerCamelCase__ )
# Out features must be a list
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(('a', 'b') ,(0, 1) ,['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] ,(0, 1) ,['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(lowerCamelCase__ ,0 ,['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(lowerCamelCase__ ,(0, 1) ,['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] ,(0,) ,['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['a', 'b'] ,(0, 2) ,['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(['b', 'a'] ,(0, 1) ,['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] ,(0, 1, -1) ,['a', 'b', 'c', 'd'] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = BackboneMixin()
_UpperCamelCase : str = ['a', 'b', 'c']
_UpperCamelCase : Tuple = ['a', 'c']
_UpperCamelCase : Optional[int] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,['a', 'c'] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
_UpperCamelCase : str = ['a', 'b']
self.assertEqual(backbone.out_features ,['a', 'b'] )
self.assertEqual(backbone.out_indices ,[0, 1] )
_UpperCamelCase : Union[str, Any] = [-3, -1]
self.assertEqual(backbone.out_features ,['a', 'c'] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 83
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 83
| 1
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 )-> Optional[Any]:
"""simple docstring"""
if name is None:
UpperCamelCase_ = None
else:
UpperCamelCase_ = "." * max(0 , spaces - 2 ) + "# {:" + str(5_0 - spaces ) + "s}"
UpperCamelCase_ = fmt.format(SCREAMING_SNAKE_CASE_ )
# Print and recurse (if needed).
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if msg is not None:
print(SCREAMING_SNAKE_CASE_ )
for k in val.keys():
recursive_print(SCREAMING_SNAKE_CASE_ , val[k] , spaces + 2 )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
print(SCREAMING_SNAKE_CASE_ , ":" , val.size() )
else:
print(SCREAMING_SNAKE_CASE_ , ":" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
UpperCamelCase_ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase_ = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase_ = param.view(*SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = param.transpose(0 , 2 )
UpperCamelCase_ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase_ = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase_ = param.view(*SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = param.transpose(0 , 1 ).contiguous()
UpperCamelCase_ = param.view(*SCREAMING_SNAKE_CASE_ )
return param
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = {}
# old versions did not store training args
UpperCamelCase_ = input_state_dict.get("args" , SCREAMING_SNAKE_CASE_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase_ = ds_args.padded_vocab_size
UpperCamelCase_ = ds_args.max_position_embeddings
UpperCamelCase_ = ds_args.hidden_size
UpperCamelCase_ = ds_args.num_layers
UpperCamelCase_ = ds_args.num_attention_heads
UpperCamelCase_ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase_ = config.n_head
# The hidden_size per head.
UpperCamelCase_ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase_ = input_state_dict["checkpoint_version"]
else:
UpperCamelCase_ = 0.0
# The model.
UpperCamelCase_ = input_state_dict["model"]
# The language model.
UpperCamelCase_ = model["language_model"]
# The embeddings.
UpperCamelCase_ = lm["embedding"]
# The word embeddings.
UpperCamelCase_ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase_ = word_embeddings[: config.vocab_size, :]
UpperCamelCase_ = word_embeddings
# The position embeddings.
UpperCamelCase_ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase_ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
UpperCamelCase_ = pos_embeddings
# The transformer.
UpperCamelCase_ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase_ = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase_ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase_ = layer_re.match(SCREAMING_SNAKE_CASE_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase_ = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase_ = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase_ = m.group(3 )
# The name of the layer.
UpperCamelCase_ = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase_ = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase_ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase_ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase_ = torch.tensor(-1E4 , dtype=torch.floataa )
UpperCamelCase_ = masked_bias
UpperCamelCase_ = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase_ = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase_ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase_ = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Store. No change of shape.
UpperCamelCase_ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase_ = megatron_to_transformers[op_name]
UpperCamelCase_ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase_ = megatron_to_transformers[op_name]
UpperCamelCase_ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase_ = transformer["final_layernorm.weight"]
UpperCamelCase_ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase_ = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase( )-> Any:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=SCREAMING_SNAKE_CASE_ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=SCREAMING_SNAKE_CASE_ , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase_ = parser.parse_args()
# Extract the basename.
UpperCamelCase_ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
else:
UpperCamelCase_ = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase_ = input_state_dict.get("args" , SCREAMING_SNAKE_CASE_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase_ = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase_ = "gelu_new"
else:
UpperCamelCase_ = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase_ = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase_ = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=SCREAMING_SNAKE_CASE_ , summary_activation=SCREAMING_SNAKE_CASE_ , summary_proj_to_labels=SCREAMING_SNAKE_CASE_ , summary_first_dropout=0.1 , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
UpperCamelCase_ = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase_ = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase_ = convert_megatron_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase_ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase_ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase_ = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
UpperCamelCase_ = "gpt2"
UpperCamelCase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = type(SCREAMING_SNAKE_CASE_ ).__name__
UpperCamelCase_ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Store the state_dict to file.
UpperCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 60
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE :Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , _lowercase = None )-> Any:
UpperCamelCase_ = None
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCamelCase_ = os.path.abspath("examples" )
for item in os.listdir(_lowercase ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_ = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowercase , feature_script=_lowercase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCamelCase_ = compare_against_test(
os.path.join(_lowercase , _lowercase ) , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = "\n".join(_lowercase )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_ = diff.replace(_lowercase , "" )
self.assertEqual(_lowercase , "" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.one_complete_example("complete_nlp_example.py" , _lowercase )
self.one_complete_example("complete_nlp_example.py" , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCamelCase_ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = False
@classmethod
def UpperCAmelCase_ ( cls )-> List[str]:
super().setUpClass()
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls )-> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
else:
self.assertIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
UpperCamelCase_ = re.findall("({.+})" , _lowercase )
UpperCamelCase_ = [r for r in results if "accuracy" in r][-1]
UpperCamelCase_ = ast.literal_eval(_lowercase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_ = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "tracking" ) ) )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 60
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase :str = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
_UpperCAmelCase : int = b.T
_UpperCAmelCase : Optional[int] = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
_UpperCAmelCase : Any = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
_UpperCAmelCase : Any = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
_UpperCAmelCase : Union[str, Any] = x.reshape(-1 , 3 )
_UpperCAmelCase : Optional[Any] = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =['''pixel_values''']
def __init__( self , A = None , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = True , **A , ) -> None:
super().__init__(**A )
_UpperCAmelCase : List[Any] = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_UpperCAmelCase : Any = get_size_dict(A )
_UpperCAmelCase : Union[str, Any] = np.array(A ) if clusters is not None else None
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : Tuple = size
_UpperCAmelCase : Any = resample
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : Optional[int] = do_color_quantize
def __lowerCAmelCase ( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> np.ndarray:
_UpperCAmelCase : int = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
A , size=(size['''height'''], size['''width''']) , resample=A , data_format=A , **A )
def __lowerCAmelCase ( self , A , A = None , ) -> np.ndarray:
_UpperCAmelCase : Optional[Any] = rescale(image=A , scale=1 / 127.5 , data_format=A )
_UpperCAmelCase : List[str] = image - 1
return image
def __lowerCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[Any] = size if size is not None else self.size
_UpperCAmelCase : Union[str, Any] = get_size_dict(A )
_UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_UpperCAmelCase : Dict = clusters if clusters is not None else self.clusters
_UpperCAmelCase : Union[str, Any] = np.array(A )
_UpperCAmelCase : str = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase : Tuple = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_normalize:
_UpperCAmelCase : str = [self.normalize(image=A ) for image in images]
if do_color_quantize:
_UpperCAmelCase : Optional[int] = [to_channel_dimension_format(A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_UpperCAmelCase : int = np.array(A )
_UpperCAmelCase : Tuple = color_quantize(A , A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_UpperCAmelCase : List[Any] = images.shape[0]
_UpperCAmelCase : int = images.reshape(A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_UpperCAmelCase : int = list(A )
else:
_UpperCAmelCase : List[str] = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : Optional[Any] = {'''input_ids''': images}
return BatchFeature(data=A , tensor_type=A )
| 263
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263
| 1
|
__a = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : List[Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
lowercase : Optional[Any] = 0
lowercase : Optional[Any] = 0
while place < len(_UpperCamelCase ):
if (place + 1 < len(_UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : List[str] = []
for arabic, roman in ROMAN:
((lowercase) , (lowercase)) : Dict = divmod(_UpperCamelCase, _UpperCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
lowercase : int = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase : List[Any] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase : Optional[int] = '''The dog is cute and lives in the garden house'''
lowercase : List[str] = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE__ )] )
lowercase : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase : Union[str, Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase : Any = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 173
| 1
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase_ : List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__( self : str , snake_case__ : Dict , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = question_encoder
_UpperCAmelCase = generator
_UpperCAmelCase = self.question_encoder
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
if os.path.isfile(snake_case__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_UpperCAmelCase = os.path.join(snake_case__ , "question_encoder_tokenizer" )
_UpperCAmelCase = os.path.join(snake_case__ , "generator_tokenizer" )
self.question_encoder.save_pretrained(snake_case__ )
self.generator.save_pretrained(snake_case__ )
@classmethod
def UpperCamelCase ( cls : List[Any] , snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
_UpperCAmelCase = kwargs.pop("config" , snake_case__ )
if config is None:
_UpperCAmelCase = RagConfig.from_pretrained(snake_case__ )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
snake_case__ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
snake_case__ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=snake_case__ , generator=snake_case__ )
def __call__( self : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.current_tokenizer(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.generator.batch_decode(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[Any] , *snake_case__ : List[Any] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.generator.decode(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = self.question_encoder
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.generator
def UpperCamelCase ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[List[str]] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "longest" , snake_case__ : str = None , snake_case__ : bool = True , **snake_case__ : Any , ):
"""simple docstring"""
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , snake_case__ , )
if max_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , max_length=snake_case__ , padding=snake_case__ , truncation=snake_case__ , **snake_case__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
text_target=snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ , **snake_case__ , )
_UpperCAmelCase = labels["input_ids"]
return model_inputs
| 133
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : Tuple = "codegen"
snake_case_ : Optional[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , snake_case__ : Any=50_400 , snake_case__ : int=2_048 , snake_case__ : Optional[Any]=2_048 , snake_case__ : Tuple=4_096 , snake_case__ : List[str]=28 , snake_case__ : List[Any]=16 , snake_case__ : int=64 , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Dict=1e-5 , snake_case__ : int=0.02 , snake_case__ : Union[str, Any]=True , snake_case__ : str=50_256 , snake_case__ : List[str]=50_256 , snake_case__ : Optional[int]=False , **snake_case__ : str , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_ctx
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = rotary_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : List[str] , snake_case__ : PretrainedConfig , snake_case__ : str = "default" , snake_case__ : List[PatchingSpec] = None , snake_case__ : bool = False , ):
"""simple docstring"""
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , "pad_token_id" , snake_case__ ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
_UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self._config.n_head
def UpperCamelCase ( self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
_UpperCAmelCase = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return 13
| 133
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : str = 16
UpperCAmelCase_ : Optional[Any] = 32
def UpperCamelCase ( _A : Accelerator , _A : int = 16 )-> Dict:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A : Any ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_A , padding="longest" , max_length=_A , pad_to_multiple_of=_A , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : Tuple = mocked_dataloaders # noqa: F811
def UpperCamelCase ( _A : Tuple , _A : int )-> Any:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _A ) == "1":
A__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
set_seed(_A )
A__ , A__ = get_dataloaders(_A , _A )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A__ = os.path.split(_A )[-1].split("." )[0]
accelerator.init_trackers(_A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A__ = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_A )
A__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_A )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_A , references=_A , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_A ),
"epoch": epoch,
} , step=_A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase ( )-> int:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_A , default=_A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
A__ = parser.parse_args()
A__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 198
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase_ : Dict = False
@skip_mps
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
A__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
A__ = CLIPTextModel(UpperCAmelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = A__ = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = pipe(**UpperCAmelCase__ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def __A ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __A ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __A ( self ):
super().test_save_load_local(expected_max_difference=5e-4 )
def __A ( self ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ = torch.manual_seed(51 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
A__ = "a painting of an elephant with glasses"
A__ = [5, 7]
A__ = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 198
| 1
|
def __lowercase ( a__ ) -> Any:
__SCREAMING_SNAKE_CASE = len(a__ )
for i in range(length - 1 ):
__SCREAMING_SNAKE_CASE = i
for k in range(i + 1 , a__ ):
if collection[k] < collection[least]:
__SCREAMING_SNAKE_CASE = k
if least != i:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase__ : Any =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ : Dict =[int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 257
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A = 16 , _A = 88 , _A = None , _A = 1 , _A = 0.0 , _A = 32 , _A = None , _A = False , _A = None , _A = None , _A = "geglu" , _A = None , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def _A ( self , _A , _A , _A=None , _A=None , _A=None , _A = True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 257
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __a ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = parent
def A ( self : int ):
return {}
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
lowerCAmelCase_ : int = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = MarkupLMFeatureExtractionTester(self )
@property
def A ( self : List[str] ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def A ( self : Union[str, Any] ):
# Initialize feature_extractor
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class()
# Test not batched input
lowerCAmelCase_ : List[Any] = get_html_strings()[0]
lowerCAmelCase_ : Any = feature_extractor(UpperCAmelCase )
# fmt: off
lowerCAmelCase_ : Optional[Any] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
lowerCAmelCase_ : Dict = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase )
# Test batched
lowerCAmelCase_ : int = get_html_strings()
lowerCAmelCase_ : Tuple = feature_extractor(UpperCAmelCase )
# fmt: off
lowerCAmelCase_ : Any = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
lowerCAmelCase_ : Any = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase )
| 351
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class snake_case_( a__ ):
__UpperCamelCase = '''Wav2Vec2FeatureExtractor'''
__UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = self.feature_extractor
lowerCAmelCase : List[str] = False
@classmethod
def lowerCamelCase__ ( cls : Tuple , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
try:
return super().from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , UpperCamelCase_ , )
lowerCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
return cls(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
def __call__( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase : Tuple = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase : Tuple = kwargs.pop('''audio''' , UpperCamelCase_ )
lowerCAmelCase : List[str] = kwargs.pop('''sampling_rate''' , UpperCamelCase_ )
lowerCAmelCase : int = kwargs.pop('''text''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase : int = args[0]
lowerCAmelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase : Optional[int] = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : List[Any] = encodings['''input_ids''']
return inputs
def lowerCamelCase__ ( self : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = kwargs.pop('''input_features''' , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = kwargs.pop('''labels''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase : Union[str, Any] = args[0]
lowerCAmelCase : Optional[int] = args[1:]
if input_features is not None:
lowerCAmelCase : Tuple = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if labels is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase : int = labels['''input_ids''']
return input_features
def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def lowerCamelCase__ ( self : Optional[int] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : List[str] = self.tokenizer
yield
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : Union[str, Any] = False
| 60
|
"""simple docstring"""
snake_case__ : str = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case__ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case__ : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case__ : Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case__ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case__ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case__ : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case__ : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 60
| 1
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Optional[Any] = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase : Optional[Any] = 50003
lowerCAmelCase : List[str] = 50002
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = PLBartTokenizer
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : str):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: Tuple = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[int] = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE_: int = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_: Tuple = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 4 , lowerCAmelCase__)]
self.assertListEqual(lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"])
SCREAMING_SNAKE_CASE_: Optional[int] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer(lowerCAmelCase__).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = PLBartTokenizer(lowerCAmelCase__ , language_codes="multi" , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_: int = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 7 , lowerCAmelCase__)]
self.assertListEqual(
lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
SCREAMING_SNAKE_CASE_: str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''uclanlp/plbart-python-en_XX'''
_UpperCAmelCase : List[str] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_UpperCAmelCase : int = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_UpperCAmelCase : Optional[Any] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict):
SCREAMING_SNAKE_CASE_: PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : List[str]):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0003)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids)
SCREAMING_SNAKE_CASE_: Optional[Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE_: int = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [5_0004, 5_0001])
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = PLBartTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase__)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE_: Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Any = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: List[Any] = targets["input_ids"]
SCREAMING_SNAKE_CASE_: List[Any] = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 5_0003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0001,
} , )
| 127
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def __magic_name__ ( lowercase ):
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
| 1
|
'''simple docstring'''
__a: Dict = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 198
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[int]:
super().setUp()
lowercase__ : List[Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowercase__ : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Optional[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowercase__ : Dict = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : str = '''adapt act apte'''
lowercase__ : Any = '''adapt act apte'''
return input_text, output_text
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Tuple = '''adapt act apte'''
lowercase__ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Any = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowercase__ : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
lowercase__ : str = '''I am a small frog.'''
lowercase__ : Union[str, Any] = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
lowercase__ : List[str] = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowercase__ : Optional[Any] = '''I am a small frog .'''
lowercase__ : Any = '''.'''
lowercase__ : List[Any] = tok(__lowerCAmelCase )['''input_ids''']
lowercase__ : Optional[Any] = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 198
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "longformer"
def __init__( self , __A = 512 , __A = 2 , __A = 1 , __A = 0 , __A = 2 , __A = 3_0522 , __A = 768 , __A = 12 , __A = 12 , __A = 3072 , __A = "gelu" , __A = 0.1 , __A = 0.1 , __A = 512 , __A = 2 , __A = 0.02 , __A = 1E-1_2 , __A = False , **__A , ) -> Dict:
super().__init__(pad_token_id=__A , **__A )
a =attention_window
a =sep_token_id
a =bos_token_id
a =eos_token_id
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =hidden_act
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =initializer_range
a =layer_norm_eps
a =onnx_export
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __A , __A = "default" , __A = None ) -> Dict:
super().__init__(__A , __A , __A )
a =True
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
a =super().outputs
if self.task == "default":
a ={0: '''batch'''}
return outputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE ( self , __A , __A = -1 , __A = -1 , __A = False , __A = None , ) -> Mapping[str, Any]:
a =super().generate_dummy_inputs(
preprocessor=__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a =torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
a =1
return inputs
| 357
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = RoFormerTokenizer
__lowerCAmelCase = RoFormerTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[int]:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A )
def SCREAMING_SNAKE_CASE ( self , **__A ) -> List[Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a ='''永和服装饰品有限公司,今天天气非常好'''
a ='''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.get_tokenizer()
a , a =self.get_chinese_input_output_texts()
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
a =tokens + [tokenizer.unk_token]
a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.get_rust_tokenizer()
a , a =self.get_chinese_input_output_texts()
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
a =tokens + [tokenizer.unk_token]
a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass
| 215
| 0
|
import argparse
import json
import subprocess
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= []
__lowercase= (
F'curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"'
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
__lowercase= subprocess.run(A__ , shell=A__ , stdout=subprocess.PIPE )
__lowercase= output.stdout.decode('utf-8' )
__lowercase= json.loads(A__ )
__lowercase= status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(A__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(A__ ) )
if len(A__ ) > 0:
__lowercase= '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
return values.split(',' )
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowerCAmelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 295
|
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''MCTCTFeatureExtractor'''
lowerCamelCase_ = '''AutoTokenizer'''
def __init__( self , lowercase , lowercase ):
"""simple docstring"""
super().__init__(lowercase , lowercase )
A_ : Dict = self.feature_extractor
A_ : Tuple = False
def __call__( self , *lowercase , **lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A_ : str = kwargs.pop('raw_speech' )
else:
A_ : List[str] = kwargs.pop('audio' , lowercase )
A_ : List[Any] = kwargs.pop('sampling_rate' , lowercase )
A_ : str = kwargs.pop('text' , lowercase )
if len(lowercase ) > 0:
A_ : str = args[0]
A_ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A_ : Optional[int] = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
A_ : List[Any] = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ : Tuple = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
A_ : Optional[int] = kwargs.pop('input_features' , lowercase )
A_ : List[Any] = kwargs.pop('labels' , lowercase )
if len(lowercase ) > 0:
A_ : str = args[0]
A_ : Union[str, Any] = args[1:]
if input_features is not None:
A_ : List[Any] = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
A_ : List[str] = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ : int = labels['input_ids']
return input_features
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def lowerCAmelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A_ : Optional[int] = True
A_ : str = self.tokenizer
yield
A_ : Any = self.feature_extractor
A_ : Dict = False
| 364
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__A ):
'''simple docstring'''
lowerCamelCase_ = ['''onnx''']
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(self , ['onnx'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['onnx'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['onnx'] )
| 192
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = BlenderbotSmallTokenizer
__magic_name__ = False
def a_ ( self ):
super().setUp()
snake_case = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
snake_case = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def a_ ( self , **__snake_case ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , __snake_case ):
snake_case = '''adapt act apte'''
snake_case = '''adapt act apte'''
return input_text, output_text
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''adapt act apte'''
snake_case = ['''adapt''', '''act''', '''ap@@''', '''te''']
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
snake_case = '''I am a small frog.'''
snake_case = tok([src_text] , padding=__snake_case , truncation=__snake_case )['''input_ids''']
snake_case = tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
snake_case = '''I am a small frog .'''
snake_case = '''.'''
snake_case = tok(__snake_case )['''input_ids''']
snake_case = tok(__snake_case )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 127
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : torch.FloatTensor
class __lowerCAmelCase ( nn.Module):
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=("DownEncoderBlock2D",) , lowerCAmelCase__=(6_4,) , lowerCAmelCase__=2 , lowerCAmelCase__=3_2 , lowerCAmelCase__="silu" , lowerCAmelCase__=True , ) -> Any:
'''simple docstring'''
super().__init__()
a__ : Optional[int] =layers_per_block
a__ : Tuple =torch.nn.Convad(
lowerCAmelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
a__ : List[Any] =None
a__ : List[Any] =nn.ModuleList([] )
# down
a__ : Any =block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase__ ):
a__ : Tuple =output_channel
a__ : List[Any] =block_out_channels[i]
a__ : Optional[Any] =i == len(lowerCAmelCase__ ) - 1
a__ : List[str] =get_down_block(
lowerCAmelCase__ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
self.down_blocks.append(lowerCAmelCase__ )
# mid
a__ : Dict =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# out
a__ : str =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase__ , eps=1E-6 )
a__ : Tuple =nn.SiLU()
a__ : Union[str, Any] =2 * out_channels if double_z else out_channels
a__ : Optional[Any] =nn.Convad(block_out_channels[-1] , lowerCAmelCase__ , 3 , padding=1 )
a__ : Tuple =False
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =x
a__ : List[str] =self.conv_in(lowerCAmelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase__ ):
def custom_forward(*lowerCAmelCase__ ):
return module(*lowerCAmelCase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
a__ : Optional[int] =torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
# middle
a__ : List[Any] =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
for down_block in self.down_blocks:
a__ : str =torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ )
# middle
a__ : Dict =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase__ )
else:
# down
for down_block in self.down_blocks:
a__ : int =down_block(lowerCAmelCase__ )
# middle
a__ : Optional[Any] =self.mid_block(lowerCAmelCase__ )
# post-process
a__ : int =self.conv_norm_out(lowerCAmelCase__ )
a__ : Optional[int] =self.conv_act(lowerCAmelCase__ )
a__ : Optional[Any] =self.conv_out(lowerCAmelCase__ )
return sample
class __lowerCAmelCase ( nn.Module):
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=("UpDecoderBlock2D",) , lowerCAmelCase__=(6_4,) , lowerCAmelCase__=2 , lowerCAmelCase__=3_2 , lowerCAmelCase__="silu" , lowerCAmelCase__="group" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
a__ : Optional[int] =layers_per_block
a__ : Union[str, Any] =nn.Convad(
lowerCAmelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
a__ : Optional[int] =None
a__ : int =nn.ModuleList([] )
a__ : int =in_channels if norm_type == "spatial" else None
# mid
a__ : int =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# up
a__ : Any =list(reversed(lowerCAmelCase__ ) )
a__ : str =reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__ ):
a__ : Optional[Any] =output_channel
a__ : str =reversed_block_out_channels[i]
a__ : Dict =i == len(lowerCAmelCase__ ) - 1
a__ : Any =get_up_block(
lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , resnet_time_scale_shift=lowerCAmelCase__ , )
self.up_blocks.append(lowerCAmelCase__ )
a__ : Union[str, Any] =output_channel
# out
if norm_type == "spatial":
a__ : Any =SpatialNorm(block_out_channels[0] , lowerCAmelCase__ )
else:
a__ : str =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase__ , eps=1E-6 )
a__ : str =nn.SiLU()
a__ : Union[str, Any] =nn.Convad(block_out_channels[0] , lowerCAmelCase__ , 3 , padding=1 )
a__ : List[Any] =False
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =z
a__ : Union[str, Any] =self.conv_in(lowerCAmelCase__ )
a__ : Any =next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase__ ):
def custom_forward(*lowerCAmelCase__ ):
return module(*lowerCAmelCase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
a__ : Optional[int] =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
a__ : str =sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
a__ : Union[str, Any] =torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
# middle
a__ : Optional[int] =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] =sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
a__ : Optional[int] =torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# middle
a__ : Optional[int] =self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict =sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
a__ : Optional[int] =up_block(lowerCAmelCase__ , lowerCAmelCase__ )
# post-process
if latent_embeds is None:
a__ : List[str] =self.conv_norm_out(lowerCAmelCase__ )
else:
a__ : int =self.conv_norm_out(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[Any] =self.conv_act(lowerCAmelCase__ )
a__ : List[str] =self.conv_out(lowerCAmelCase__ )
return sample
class __lowerCAmelCase ( nn.Module):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="random" , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
a__ : Optional[int] =n_e
a__ : Tuple =vq_embed_dim
a__ : int =beta
a__ : str =legacy
a__ : str =nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
a__ : Optional[int] =remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
a__ : List[str] =self.used.shape[0]
a__ : List[Any] =unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
a__ : str =self.re_embed
a__ : Dict =self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
a__ : Any =n_e
a__ : Optional[int] =sane_index_shape
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Any =inds.shape
assert len(lowerCAmelCase__ ) > 1
a__ : Optional[Any] =inds.reshape(ishape[0] , -1 )
a__ : Tuple =self.used.to(lowerCAmelCase__ )
a__ : Union[str, Any] =(inds[:, :, None] == used[None, None, ...]).long()
a__ : Tuple =match.argmax(-1 )
a__ : Optional[Any] =match.sum(2 ) < 1
if self.unknown_index == "random":
a__ : Tuple =torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
a__ : str =self.unknown_index
return new.reshape(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any =inds.shape
assert len(lowerCAmelCase__ ) > 1
a__ : Tuple =inds.reshape(ishape[0] , -1 )
a__ : Tuple =self.used.to(lowerCAmelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
a__ : List[Any] =0 # simply set to zero
a__ : int =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase__ )
return back.reshape(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] =z.permute(0 , 2 , 3 , 1 ).contiguous()
a__ : Tuple =z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
a__ : Optional[Any] =torch.argmin(torch.cdist(lowerCAmelCase__ , self.embedding.weight ) , dim=1 )
a__ : List[Any] =self.embedding(lowerCAmelCase__ ).view(z.shape )
a__ : List[str] =None
a__ : str =None
# compute loss for embedding
if not self.legacy:
a__ : Union[str, Any] =self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
a__ : Optional[Any] =torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
a__ : List[Any] =z + (z_q - z).detach()
# reshape back to match original input shape
a__ : List[str] =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
a__ : Dict =min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
a__ : Tuple =self.remap_to_used(lowerCAmelCase__ )
a__ : str =min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
a__ : Any =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if self.remap is not None:
a__ : Union[str, Any] =indices.reshape(shape[0] , -1 ) # add batch axis
a__ : str =self.unmap_to_all(lowerCAmelCase__ )
a__ : List[Any] =indices.reshape(-1 ) # flatten again
# get quantized latent vectors
a__ : Optional[int] =self.embedding(lowerCAmelCase__ )
if shape is not None:
a__ : Any =z_q.view(lowerCAmelCase__ )
# reshape back to match original input shape
a__ : Union[str, Any] =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] =parameters
a__ , a__ : str =torch.chunk(lowerCAmelCase__ , 2 , dim=1 )
a__ : Any =torch.clamp(self.logvar , -30.0 , 20.0 )
a__ : Any =deterministic
a__ : List[Any] =torch.exp(0.5 * self.logvar )
a__ : int =torch.exp(self.logvar )
if self.deterministic:
a__ : Optional[Any] =torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self , lowerCAmelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
a__ : Any =randn_tensor(
self.mean.shape , generator=lowerCAmelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
a__ : Optional[Any] =self.mean + self.std * sample
return x
def _lowercase ( self , lowerCAmelCase__=None ) -> Tuple:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=[1, 2, 3] ) -> Dict:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
a__ : Tuple =np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.mean
| 148
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[int] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "depth_multiplier" ) )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=0.25 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=None , ) -> Dict:
'''simple docstring'''
a__ : int =parent
a__ : Optional[Any] =batch_size
a__ : Tuple =num_channels
a__ : Dict =image_size
a__ : Union[str, Any] =depth_multiplier
a__ : List[str] =min_depth
a__ : Dict =tf_padding
a__ : Any =int(last_hidden_size * depth_multiplier )
a__ : Tuple =output_stride
a__ : Optional[Any] =hidden_act
a__ : str =classifier_dropout_prob
a__ : int =use_labels
a__ : List[Any] =is_training
a__ : List[str] =num_labels
a__ : Dict =initializer_range
a__ : Tuple =scope
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Any =None
a__ : List[Any] =None
if self.use_labels:
a__ : Dict =ids_tensor([self.batch_size] , self.num_labels )
a__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ) -> str:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] =MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : str =model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Any =self.num_labels
a__ : Dict =MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[int] =model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : List[str] =config_and_inputs
a__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_lowercase : Optional[int] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : str = False
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] =MobileNetVaModelTester(self )
a__ : Optional[Any] =MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] =model_class(lowerCAmelCase__ )
a__ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] =[*signature.parameters.keys()]
a__ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : int =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
a__ : List[Any] =model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =outputs.hidden_states
a__ : Optional[int] =2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
a__ , a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Union[str, Any] =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[str] =MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : str =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(lowerCAmelCase__ )
a__ : Optional[Any] =self.default_image_processor
a__ : Optional[int] =prepare_img()
a__ : Optional[int] =image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a__ : int =model(**lowerCAmelCase__ )
# verify the logits
a__ : str =torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a__ : int =torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 148
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A_ ( ):
"""simple docstring"""
_a = ArgumentParser('''Diffusers CLI tool''', usage='''diffusers-cli <command> [<args>]''' )
_a = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
# Let's go
_a = parser.parse_args()
if not hasattr(_lowerCAmelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
_a = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 350
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> str:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_multiple_size
_a = hidden_act
_a = hidden_dropout
_a = attention_dropout
_a = weight_tying
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = True
_a = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = True
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
_a = output_from_no_past['''hidden_states'''][0]
_a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ : List[str] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : Tuple = False
A_ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = GPTNeoXJapaneseModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = '''abeja/gpt-neox-japanese-2.7b'''
_a = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_a = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_a = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
_a = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
_a = []
for prompt in prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=50 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 153
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
lowercase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase ( lowerCAmelCase__ : str ) -> str:
with open(lowerCAmelCase_ , '''rb''' ) as f:
__a = Image.open(lowerCAmelCase_ )
return im.convert('''RGB''' )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Dict = field(
default=_lowerCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__UpperCAmelCase : str = field(
default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__UpperCAmelCase : Dict = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'} )
__UpperCAmelCase : Dict = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__UpperCAmelCase : str = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__UpperCAmelCase : Dict = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCAmelCase ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__UpperCAmelCase : Optional[Any] = field(
default=_lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCamelCase )} , )
__UpperCAmelCase : str = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCAmelCase : List[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__UpperCAmelCase : Any = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCAmelCase : int = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__UpperCAmelCase : Union[str, Any] = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCAmelCase : int = field(
default=_lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase ( lowerCAmelCase__ : List[Any] ) -> List[Any]:
__a = torch.stack([example['''pixel_values'''] for example in examples] )
__a = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase ( ) -> Tuple:
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__a = {}
if data_args.train_dir is not None:
__a = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
__a = os.path.join(data_args.validation_dir , '''**''' )
__a = load_dataset(
'''imagefolder''' , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
__a = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
__a = dataset["""train"""].train_test_split(data_args.train_val_split )
__a = split["""train"""]
__a = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__a = dataset["""train"""].features["""labels"""].names
__a = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
__a = str(lowerCAmelCase_ )
__a = label
# Load the accuracy metric from the datasets package
__a = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ : Any ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__a = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__a = image_processor.size["""shortest_edge"""]
else:
__a = (image_processor.size["""height"""], image_processor.size["""width"""])
__a = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__a = Compose(
[
RandomResizedCrop(lowerCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__a = Compose(
[
Resize(lowerCAmelCase_ ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase__ : str ):
__a = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(lowerCAmelCase__ : str ):
__a = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__a = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__a = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase_ )
# Initalize our trainer
__a = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__a = None
if training_args.resume_from_checkpoint is not None:
__a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a = last_checkpoint
__a = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase_ )
trainer.save_metrics('''eval''' , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
__a = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 45
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Any = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """markuplm"""
def __init__( self ,a_=30_522 ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=0 ,a_=2 ,a_=256 ,a_=1_024 ,a_=216 ,a_=1_001 ,a_=32 ,a_=50 ,a_="absolute" ,a_=True ,a_=None ,**a_ ,) -> Union[str, Any]:
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,**a_ ,)
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = position_embedding_type
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : List[Any] = classifier_dropout
# additional properties
_UpperCAmelCase : Dict = max_depth
_UpperCAmelCase : Union[str, Any] = max_xpath_tag_unit_embeddings
_UpperCAmelCase : Optional[int] = max_xpath_subs_unit_embeddings
_UpperCAmelCase : List[Any] = tag_pad_id
_UpperCAmelCase : Tuple = subs_pad_id
_UpperCAmelCase : List[str] = xpath_unit_hidden_size
| 215
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_MASKED_LM_MAPPING
__SCREAMING_SNAKE_CASE = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCamelCase ( self ):
A__ = pipeline(task='''fill-mask''',model='''sshleifer/tiny-distilroberta-base''',top_k=2,framework='''tf''' )
A__ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__a,decimals=6 ),[
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_8015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_5506, '''token_str''': ''' accuser'''},
],)
A__ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__a,decimals=6 ),[
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_8015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_5506,
'''token_str''': ''' accuser''',
},
],)
A__ = unmasker('''My name is <mask>''',targets=[''' Patrick''', ''' Clara''', ''' Teven'''],top_k=3 )
self.assertEqual(
nested_simplify(__a,decimals=6 ),[
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2941, '''token_str''': ''' Te'''},
],)
@require_torch
def UpperCamelCase ( self ):
A__ = pipeline(task='''fill-mask''',model='''sshleifer/tiny-distilroberta-base''',top_k=2,framework='''pt''' )
A__ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__a,decimals=6 ),[
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_5676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_6416, '''token_str''': '''ELS'''},
],)
A__ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__a,decimals=6 ),[
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_6416, '''token_str''': '''ELS'''},
],)
A__ = unmasker('''My name is <mask>''',targets=[''' Patrick''', ''' Clara''', ''' Teven'''],top_k=3 )
self.assertEqual(
nested_simplify(__a,decimals=6 ),[
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3606, '''token_str''': ''' Clara'''},
],)
A__ = unmasker('''My name is <mask> <mask>''',top_k=2 )
self.assertEqual(
nested_simplify(__a,decimals=6 ),[
[
{
'''score''': 2.2E-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
],)
@require_torch_gpu
def UpperCamelCase ( self ):
A__ = pipeline('''fill-mask''',model='''hf-internal-testing/tiny-random-distilbert''',device=0,framework='''pt''' )
# convert model to fp16
pipe.model.half()
A__ = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__a,__a )
@slow
@require_torch
def UpperCamelCase ( self ):
A__ = pipeline(task='''fill-mask''',model='''distilroberta-base''',top_k=2,framework='''pt''' )
self.run_large_test(__a )
@slow
@require_tf
def UpperCamelCase ( self ):
A__ = pipeline(task='''fill-mask''',model='''distilroberta-base''',top_k=2,framework='''tf''' )
self.run_large_test(__a )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__a ),[
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1573, '''token_str''': ''' Chris'''},
],)
A__ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__a ),[
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_2790,
'''token_str''': ''' Lyon''',
},
],)
A__ = unmasker('''My name is <mask>''',targets=[''' Patrick''', ''' Clara''', ''' Teven'''],top_k=3 )
self.assertEqual(
nested_simplify(__a ),[
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2941, '''token_str''': ''' Te'''},
],)
@require_torch
def UpperCamelCase ( self ):
A__ = pipeline(task='''fill-mask''',model='''sshleifer/tiny-distilroberta-base''',framework='''pt''' )
A__ = None
A__ = None
self.run_pipeline_test(__a,[] )
@require_tf
def UpperCamelCase ( self ):
A__ = pipeline(task='''fill-mask''',model='''sshleifer/tiny-distilroberta-base''',framework='''tf''' )
A__ = None
A__ = None
self.run_pipeline_test(__a,[] )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
A__ = FillMaskPipeline(model=__a,tokenizer=__a )
A__ = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = fill_masker.tokenizer
A__ = fill_masker.model
A__ = fill_masker(
f"This is a {tokenizer.mask_token}",)
self.assertEqual(
__a,[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],)
A__ = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
__a,[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],)
A__ = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
__a,[
[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],
[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],
],)
with self.assertRaises(__a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__a ):
fill_masker('''This is''' )
self.run_test_top_k(__a,__a )
self.run_test_targets(__a,__a )
self.run_test_top_k_targets(__a,__a )
self.fill_mask_with_duplicate_targets_and_top_k(__a,__a )
self.fill_mask_with_multiple_masks(__a,__a )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = tokenizer.get_vocab()
A__ = sorted(vocab.keys() )[:2]
# Pipeline argument
A__ = FillMaskPipeline(model=__a,tokenizer=__a,targets=__a )
A__ = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
__a,[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],)
A__ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs},__a )
A__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs},set(__a ) )
# Call argument
A__ = FillMaskPipeline(model=__a,tokenizer=__a )
A__ = fill_masker(f"This is a {tokenizer.mask_token}",targets=__a )
self.assertEqual(
__a,[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],)
A__ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs},__a )
A__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs},set(__a ) )
# Score equivalence
A__ = fill_masker(f"This is a {tokenizer.mask_token}",targets=__a )
A__ = [top_mask['''token_str'''] for top_mask in outputs]
A__ = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__a ) == set(__a ):
A__ = fill_masker(f"This is a {tokenizer.mask_token}",targets=__a )
A__ = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__a ),nested_simplify(__a ) )
# Raises with invalid
with self.assertRaises(__a ):
A__ = fill_masker(f"This is a {tokenizer.mask_token}",targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__a ):
A__ = fill_masker(f"This is a {tokenizer.mask_token}",targets=[''''''] )
with self.assertRaises(__a ):
A__ = fill_masker(f"This is a {tokenizer.mask_token}",targets='''''' )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = FillMaskPipeline(model=__a,tokenizer=__a,top_k=2 )
A__ = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
__a,[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],)
A__ = FillMaskPipeline(model=__a,tokenizer=__a )
A__ = fill_masker(f"This is a {tokenizer.mask_token}",top_k=2 )
self.assertEqual(
__a,[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],)
self.assertEqual(nested_simplify(__a ),nested_simplify(__a ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = tokenizer.get_vocab()
A__ = FillMaskPipeline(model=__a,tokenizer=__a )
# top_k=2, ntargets=3
A__ = sorted(vocab.keys() )[:3]
A__ = fill_masker(f"This is a {tokenizer.mask_token}",top_k=2,targets=__a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A__ = [el['''token_str'''] for el in sorted(__a,key=lambda __lowerCamelCase : x["score"],reverse=__a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__a ).issubset(__a ):
A__ = fill_masker(f"This is a {tokenizer.mask_token}",top_k=3,targets=__a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__a ),nested_simplify(__a ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = FillMaskPipeline(model=__a,tokenizer=__a )
A__ = tokenizer.get_vocab()
# String duplicates + id duplicates
A__ = sorted(vocab.keys() )[:3]
A__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A__ = fill_masker(f"My name is {tokenizer.mask_token}",targets=__a,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__a ),3 )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = FillMaskPipeline(model=__a,tokenizer=__a )
A__ = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}",top_k=2 )
self.assertEqual(
__a,[
[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],
[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],
[
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
{'''sequence''': ANY(__a ), '''score''': ANY(__a ), '''token''': ANY(__a ), '''token_str''': ANY(__a )},
],
],)
| 361
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 39
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] = False , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(A__ , A__ )
UpperCamelCase = nn.Embedding(A__ , A__ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=A__ )
UpperCamelCase = TaConfig(
vocab_size=A__ , d_model=A__ , num_heads=A__ , d_kv=A__ , d_ff=A__ , dropout_rate=A__ , feed_forward_proj=A__ , is_decoder=A__ , is_encoder_decoder=A__ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(A__ ):
UpperCamelCase = TaBlock(A__ )
self.encoders.append(A__ )
UpperCamelCase = TaLayerNorm(A__ )
UpperCamelCase = nn.Dropout(p=A__ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.token_embedder(A__ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(A__ , device=encoder_input_tokens.device )
x += self.position_encoding(A__ )
UpperCamelCase = self.dropout_pre(A__ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(A__ , A__ )
for lyr in self.encoders:
UpperCamelCase = lyr(A__ , A__ )[0]
UpperCamelCase = self.layer_norm(A__ )
return self.dropout_post(A__ ), encoder_inputs_mask
| 343
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[str] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192
| 0
|
import requests
__snake_case = '''''' # <-- Put your OpenWeatherMap appid here!
__snake_case = '''https://api.openweathermap.org/data/2.5/'''
def lowerCAmelCase_ ( __lowerCAmelCase = "Chicago" , __lowerCAmelCase = APPID )-> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def lowerCAmelCase_ ( __lowerCAmelCase = "Kolkata, India" , __lowerCAmelCase = APPID )-> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def lowerCAmelCase_ ( __lowerCAmelCase = 55.68 , __lowerCAmelCase = 12.57 , __lowerCAmelCase = APPID )-> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__snake_case = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 78
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : Union[str, Any] = """CLIPImageProcessor"""
__lowerCamelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case__ , )
UpperCAmelCase : int =kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase : List[Any] =self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
UpperCAmelCase : Tuple =self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase : List[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.tokenizer.model_input_names
UpperCAmelCase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case__ , )
return self.image_processor
| 78
| 1
|
"""simple docstring"""
import random
class lowerCamelCase__ :
@staticmethod
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = [ord(SCREAMING_SNAKE_CASE ) for i in text]
snake_case : Optional[Any] = []
snake_case : str = []
for i in plain:
snake_case : int = random.randint(1 , 300 )
snake_case : int = (i + k) * k
cipher.append(SCREAMING_SNAKE_CASE )
key.append(SCREAMING_SNAKE_CASE )
return cipher, key
@staticmethod
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
snake_case : Optional[int] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(SCREAMING_SNAKE_CASE ) )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A , __A = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 148
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : str = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_attention_mask
snake_case : str = use_token_type_ids
snake_case : int = use_labels
snake_case : Any = vocab_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_attention_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : str = config_and_inputs
snake_case : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Optional[Any] = True
a__ : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case : List[Any] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=SCREAMING_SNAKE_CASE )
snake_case : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE )[0]
snake_case : List[Any] = 50_000
snake_case : List[str] = (1, 6, vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 148
| 1
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase ( _snake_case : str , _snake_case : complex , _snake_case : str = "x" , _snake_case : float = 10**-10 , _snake_case : int = 1 , ) ->complex:
"""simple docstring"""
__snake_case : Optional[Any] = symbols(_snake_case )
__snake_case : Tuple = lambdify(_snake_case , _snake_case )
__snake_case : Optional[Any] = lambdify(_snake_case , diff(_snake_case , _snake_case ) )
__snake_case : List[str] = starting_point
while True:
if diff_function(_snake_case ) != 0:
__snake_case : Union[str, Any] = prev_guess - multiplicity * func(_snake_case ) / diff_function(
_snake_case )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__snake_case : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 367
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( _lowercase ):
__magic_name__: Optional[Any] = "audio-spectrogram-transformer"
def __init__( self : Dict , _A : Dict=768 , _A : Optional[Any]=12 , _A : Union[str, Any]=12 , _A : Optional[Any]=3072 , _A : int="gelu" , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : str=0.0_2 , _A : Union[str, Any]=1E-12 , _A : List[str]=16 , _A : Dict=True , _A : List[str]=10 , _A : Tuple=10 , _A : int=1024 , _A : Any=128 , **_A : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__a )
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Union[str, Any] = patch_size
snake_case_ : Dict = qkv_bias
snake_case_ : str = frequency_stride
snake_case_ : Dict = time_stride
snake_case_ : str = max_length
snake_case_ : Any = num_mel_bins
| 327
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = KandinskyVaaControlnetPipeline
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ = False
@property
def snake_case_ (self ) -> Tuple:
return 32
@property
def snake_case_ (self ) -> Optional[int]:
return 32
@property
def snake_case_ (self ) -> int:
return self.time_input_dim
@property
def snake_case_ (self ) -> Dict:
return self.time_input_dim * 4
@property
def snake_case_ (self ) -> List[str]:
return 1_00
@property
def snake_case_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase = UNetaDConditionModel(**__a )
return model
@property
def snake_case_ (self ) -> Dict:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="epsilon" , thresholding=__a , )
UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ (self , __a , __a=0 ) -> Any:
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(__a )
else:
UpperCamelCase = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case_ (self ) -> int:
UpperCamelCase = "cpu"
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**__a )
UpperCamelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = pipe(**self.get_dummy_inputs(__a ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Dict:
UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
UpperCamelCase = torch.from_numpy(np.array(__a ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
UpperCamelCase = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCamelCase = "A robot, 4k photo"
UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase = pipeline(
image_embeds=__a , negative_image_embeds=__a , hint=__a , generator=__a , num_inference_steps=1_00 , output_type="np" , )
UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__a , __a )
| 153
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' ,[
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] ,)
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Any ):
'''simple docstring'''
A_ : Union[str, Any] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' ,'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
A_ : Optional[int] = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' ,[
DatasetInfo(),
DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,),
] ,)
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : DatasetInfo ):
'''simple docstring'''
A_ : Union[str, Any] = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
A_ : str = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase ,'dataset_info.json' ) )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[Any] = DatasetInfo(
description='foo' ,citation='bar' ,homepage='https://foo.bar' ,license='CC0' ,features=Features({'a': Value('int32' )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train', 'num_examples': 42}] ,download_checksums={} ,download_size=13_37 ,post_processing_size=4_42 ,dataset_size=12_34 ,size_in_bytes=13_37 + 4_42 + 12_34 ,)
A_ : Tuple = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
A_ : Dict = yaml.safe_dump(__lowercase )
A_ : Tuple = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = DatasetInfo()
A_ : Optional[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' ,[
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] ,)
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : DatasetInfosDict ):
'''simple docstring'''
A_ : Union[str, Any] = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
A_ : Optional[int] = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A_ : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A_ : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase ,'README.md' ) )
| 192
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : Optional[int] = latents.to(lowercase )
A_ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
A_ : Dict = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
A_ : Tuple = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
A_ : Dict = self.image_processor(lowercase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
A_ : List[str] = image.to(dtype=self.image_encoder.dtype , device=lowercase )
A_ : Tuple = self.image_encoder(lowercase )['last_hidden_state']
A_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : List[str] = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
A_ : str = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 2_5 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 6_4 , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
if isinstance(lowercase , PIL.Image.Image ):
A_ : int = 1
elif isinstance(lowercase , torch.Tensor ):
A_ : int = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : List[str] = len(lowercase )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}''' )
A_ : Any = self._execution_device
A_ : List[Any] = batch_size * num_images_per_prompt
A_ : int = guidance_scale > 1.0
A_ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Dict = self.scheduler.timesteps
A_ : int = self.prior.config.num_embeddings
A_ : int = self.prior.config.embedding_dim
A_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : Union[str, Any] = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Any = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
A_ , A_ : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_ , A_ : List[Any] = noise_pred.chunk(2 )
A_ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : Optional[int] = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
A_ : str = []
for i, latent in enumerate(lowercase ):
print()
A_ : Optional[Any] = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowercase )
A_ : Dict = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
A_ : Dict = images.cpu().numpy()
if output_type == "pil":
A_ : str = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 192
| 1
|
def A_ ( A__ ) -> bool:
if not isinstance(A__ , A__ ):
a__ : List[str] = F'Input value of [number={number}] must be an integer'
raise TypeError(A__ )
if number < 0:
return False
a__ : int = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
lowerCAmelCase__ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
lowerCAmelCase__ = ["key_proj", "value_proj", "query_proj"]
lowerCAmelCase__ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowerCAmelCase__ = key.split("." )
if attributes[0] == "lm_head":
lowerCAmelCase__ = prophet
lowerCAmelCase__ = prophet_old
else:
lowerCAmelCase__ = prophet.prophetnet
lowerCAmelCase__ = prophet_old.model
lowerCAmelCase__ = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase__ = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase__ = old_model.weight
logger.info(F'{attribute} is initialized.' )
lowerCAmelCase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase__ = old_model.bias
logger.info(F'{attribute} is initialized' )
lowerCAmelCase__ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , "in_proj_weight" ):
lowerCAmelCase__ = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase__ = True
break
if attribute.isdigit():
lowerCAmelCase__ = model[int(lowerCAmelCase_ )]
lowerCAmelCase__ = old_model[int(lowerCAmelCase_ )]
else:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
lowerCAmelCase__ = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 359
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCamelCase = '__DUMMY_TRANSFORMERS_USER__'
UpperCamelCase = 'Dummy User'
UpperCamelCase = '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'
UpperCamelCase = 'https://hub-ci.huggingface.co'
UpperCamelCase = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
UpperCamelCase = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
UpperCamelCase = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCAmelCase_ )
@pytest.fixture
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCAmelCase_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCAmelCase_ )
@pytest.fixture
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCAmelCase_ )
@pytest.fixture
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
HfFolder.save_token(lowerCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def _A ( ):
"""simple docstring"""
return HfApi(endpoint=lowerCAmelCase_ )
@pytest.fixture(scope="session" )
def _A ( lowerCAmelCase_ : HfApi ):
"""simple docstring"""
lowerCAmelCase__ = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase_ )
@pytest.fixture
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
def _cleanup_repo(lowerCAmelCase_ : Optional[Any] ):
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
@contextmanager
def _temporary_repo(lowerCAmelCase_ : str ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def _A ( lowerCAmelCase_ : HfApi , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = F'repo_txt_data-{int(time.time() * 1_0E3 )}'
lowerCAmelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def _A ( lowerCAmelCase_ : HfApi , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = F'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}'
lowerCAmelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def _A ( lowerCAmelCase_ : HfApi , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = F'repo_zipped_img_data-{int(time.time() * 1_0E3 )}'
lowerCAmelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 221
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.