code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = KandinskyImgaImgPipeline
lowerCamelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
lowerCamelCase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
lowerCamelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase = False
@property
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
return 3_2
@property
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
return 3_2
@property
def snake_case__ ( self : Dict )-> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
return 1_0_0
@property
def snake_case__ ( self : str )-> List[str]:
'''simple docstring'''
A__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=3_7,num_attention_heads=4,num_hidden_layers=5,vocab_size=1_0_0_5,)
A__ = MultilingualCLIP(_a )
A__ = text_encoder.eval()
return text_encoder
@property
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
torch.manual_seed(0 )
A__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A__ = UNetaDConditionModel(**_a )
return model
@property
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
A__ = DDIMScheduler(**_a )
A__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : Union[str, Any]=0 )-> Dict:
'''simple docstring'''
A__ = floats_tensor((1, self.cross_attention_dim),rng=random.Random(_a ) ).to(_a )
A__ = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
A__ = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(_a ) ).to(_a )
A__ = image.cpu().permute(0,2,3,1 )[0]
A__ = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(_a ).startswith('mps' ):
A__ = torch.manual_seed(_a )
else:
A__ = torch.Generator(device=_a ).manual_seed(_a )
A__ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_a )
A__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A__ = pipe(**self.get_dummy_inputs(_a ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(_a ),return_dict=_a,)[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A__ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str )-> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A__ = 'A red cartoon frog, 4k'
A__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(_a )
A__ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1',torch_dtype=torch.floataa )
A__ = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ , A__ = pipe_prior(
_a,generator=_a,num_inference_steps=5,negative_prompt='',).to_tuple()
A__ = pipeline(
_a,image=_a,image_embeds=_a,negative_image_embeds=_a,generator=_a,num_inference_steps=1_0_0,height=7_6_8,width=7_6_8,strength=0.2,output_type='np',)
A__ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a,_a )
| 7 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 291 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase__ : Any = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( lowercase : str, lowercase : Tuple, lowercase : Any, lowercase : Any, lowercase : List[Any]=False, lowercase : str=True ) -> Optional[int]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase = cached_file(snake_case__, snake_case__, force_download=not use_cached_models )
_UpperCamelCase = config_class.from_json_file(snake_case__ )
_UpperCamelCase = True
_UpperCamelCase = True
print(F"""Building TensorFlow model from configuration: {config}""" )
_UpperCamelCase = model_class(snake_case__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase = cached_file(
snake_case__, snake_case__, force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase = load_pytorch_checkpoint_in_tfa_model(snake_case__, snake_case__ )
if compare_with_pt_model:
_UpperCamelCase = tf_model(tf_model.dummy_inputs, training=snake_case__ ) # build the network
_UpperCamelCase = torch.load(snake_case__, map_location='''cpu''' )
_UpperCamelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case__, config=snake_case__, state_dict=snake_case__ )
with torch.no_grad():
_UpperCamelCase = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase = pto[0].numpy()
_UpperCamelCase = tfo[0].numpy()
_UpperCamelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(snake_case__, save_format='''h5''' )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any], lowercase : List[Any]=None, lowercase : Any=None, lowercase : str=False, lowercase : List[Any]=False, lowercase : Optional[Any]=False, lowercase : Any=False, ) -> List[str]:
"""simple docstring"""
if args_model_type is None:
_UpperCamelCase = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase = [args_model_type]
for j, model_type in enumerate(snake_case__, start=1 ):
print('''=''' * 100 )
print(F""" Converting model type {j}/{len(snake_case__ )}: {model_type}""" )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case__, snake_case__ ), start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
_UpperCamelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(snake_case__ )}: {model_shortcut_name} - model_type {model_type}""" )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase = cached_file(snake_case__, snake_case__, force_download=not use_cached_models )
else:
_UpperCamelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase = cached_file(snake_case__, snake_case__, force_download=not use_cached_models )
else:
_UpperCamelCase = model_shortcut_name
if os.path.isfile(snake_case__ ):
_UpperCamelCase = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=snake_case__, pytorch_checkpoint_path=snake_case__, config_file=snake_case__, tf_dump_path=os.path.join(snake_case__, model_shortcut_name + '''-tf_model.h5''' ), compare_with_pt_model=snake_case__, )
if remove_cached_files:
os.remove(snake_case__ )
os.remove(snake_case__ )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
lowercase__ : int = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 324 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( UpperCAmelCase__ ,UpperCAmelCase__ ):
__lowerCamelCase : Optional[int] = "dinat"
__lowerCamelCase : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=64 , _lowerCAmelCase=[3, 4, 6, 5] , _lowerCAmelCase=[2, 4, 8, 16] , _lowerCAmelCase=7 , _lowerCAmelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowerCAmelCase=3.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Any:
super().__init__(**_a )
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_a )
_lowerCAmelCase = num_heads
_lowerCAmelCase = kernel_size
_lowerCAmelCase = dilations
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_a ) - 1) )
_lowerCAmelCase = layer_scale_init_value
_lowerCAmelCase = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 158 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = KandinskyVaaControlnetImgaImgPipeline
lowerCAmelCase_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowerCAmelCase_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowerCAmelCase_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ = False
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1_0_0
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase__ = UNetaDConditionModel(**_a )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.dummy_unet
lowerCamelCase__ = self.dummy_movq
lowerCamelCase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase__ = DDIMScheduler(**_a )
lowerCamelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
lowerCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
lowerCamelCase__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
lowerCamelCase__ = torch.manual_seed(_a )
else:
lowerCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
lowerCamelCase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''cpu'''
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**_a )
lowerCamelCase__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCamelCase__ = pipe(**self.get_dummy_inputs(_a ) )
lowerCamelCase__ = output.images
lowerCamelCase__ = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowerCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase__ = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowerCamelCase__ = torch.from_numpy(np.array(_a ) ).float() / 255.0
lowerCamelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase__ = '''A robot, 4k photo'''
lowerCamelCase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
lowerCamelCase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
lowerCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ = pipe_prior(
_a , image=_a , strength=0.85 , generator=_a , negative_prompt='''''' , ).to_tuple()
lowerCamelCase__ = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type='''np''' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_a , _a )
| 209 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = resample
lowerCamelCase = do_center_crop
lowerCamelCase = crop_size
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_normalize
lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
"""simple docstring"""
lowerCamelCase = do_resize if do_resize is not None else self.do_resize
lowerCamelCase = size if size is not None else self.size
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = resample if resample is not None else self.resample
lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase = crop_size if crop_size is not None else self.crop_size
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase = image_mean if image_mean is not None else self.image_mean
lowerCamelCase = image_std if image_std is not None else self.image_std
lowerCamelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_a ):
lowerCamelCase = target_sizes.numpy()
lowerCamelCase = []
for idx in range(len(_a ) ):
lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a )
lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowerCamelCase = logits.argmax(dim=1 )
lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 291 | 0 |
def A ( lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = [False] * len(snake_case__ )
UpperCamelCase = []
queue.append(snake_case__ )
UpperCamelCase = True
while queue:
UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case__ )
UpperCamelCase = True
UpperCamelCase = u
return visited[t]
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = [-1] * (len(snake_case__ ))
UpperCamelCase = 0
while bfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
UpperCamelCase = float('Inf' )
UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase = min(snake_case__ , graph[parent[s]][s] )
UpperCamelCase = parent[s]
max_flow += path_flow
UpperCamelCase = sink
while v != source:
UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase = parent[v]
return max_flow
_UpperCAmelCase : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase : Tuple = 0, 5
print(ford_fulkerson(graph, source, sink))
| 222 |
"""simple docstring"""
import operator as op
lowerCAmelCase : Dict = """scaler.pt"""
lowerCAmelCase : Tuple = """pytorch_model"""
lowerCAmelCase : Union[str, Any] = """random_states"""
lowerCAmelCase : Union[str, Any] = """optimizer"""
lowerCAmelCase : Dict = """scheduler"""
lowerCAmelCase : int = """pytorch_model.bin"""
lowerCAmelCase : str = """pytorch_model.bin.index.json"""
lowerCAmelCase : Union[str, Any] = """model.safetensors"""
lowerCAmelCase : List[Any] = """model.safetensors.index.json"""
lowerCAmelCase : List[Any] = """1.10.2"""
lowerCAmelCase : Any = """py38"""
lowerCAmelCase : Optional[int] = """4.17.0"""
lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCAmelCase : Any = """2.0.1"""
lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase : Union[str, Any] = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 291 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class snake_case :
'''simple docstring'''
A_ : Tuple = True
A_ : Union[str, Any] = None
# Automatically constructed
A_ : List[Any] = "PIL.Image.Image"
A_ : Any = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A_ : Optional[int] = field(default="Image" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : str ):
'''simple docstring'''
return self.pa_type
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Any ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_a, _a ):
__A = np.array(_a )
if isinstance(_a, _a ):
return {"path": value, "bytes": None}
elif isinstance(_a, _a ):
return {"path": None, "bytes": value}
elif isinstance(_a, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_a )
elif isinstance(_a, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_a )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
__A = {}
__A , __A = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(_a ):
__A = PIL.Image.open(_a )
else:
__A = path.split('''::''' )[-1]
try:
__A = string_to_dict(_a, config.HUB_DATASETS_URL )['''repo_id''']
__A = token_per_repo_id.get(_a )
except ValueError:
__A = None
with xopen(_a, '''rb''', use_auth_token=_a ) as f:
__A = BytesIO(f.read() )
__A = PIL.Image.open(bytes_ )
else:
__A = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : str ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__A = pa.array([None] * len(_a ), type=pa.binary() )
__A = pa.StructArray.from_arrays([bytes_array, storage], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__A = pa.array([None] * len(_a ), type=pa.string() )
__A = pa.StructArray.from_arrays([storage, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__A = storage.field('''bytes''' )
else:
__A = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__A = storage.field('''path''' )
else:
__A = pa.array([None] * len(_a ), type=pa.string() )
__A = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__A = pa.array(
[encode_np_array(np.array(_a ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
__A = pa.array([None] * len(_a ), type=pa.string() )
__A = pa.StructArray.from_arrays(
[bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[int] ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase : Union[str, Any] ):
with xopen(_a, '''rb''' ) as f:
__A = f.read()
return bytes_
__A = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__A = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field('''path''' ).to_pylist()], type=pa.string(), )
__A = pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
def lowerCAmelCase ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__A = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = BytesIO()
if image.format in list_image_compression_formats():
__A = image.format
else:
__A = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(snake_case__ , format=snake_case__ )
return buffer.getvalue()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if hasattr(snake_case__ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
__A = array.dtype
__A = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__A = dtype.kind
__A = dtype.itemsize
__A = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__A = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__A = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__A = dtype_byteorder + dtype_kind + str(snake_case__ )
__A = np.dtype(snake_case__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
__A = PIL.Image.fromarray(array.astype(snake_case__ ) )
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
__A , __A = first_non_null_value(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(snake_case__ , np.ndarray ):
__A = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
elif isinstance(snake_case__ , PIL.Image.Image ):
__A = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
else:
return objs
else:
return objs
| 266 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Any:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(snake_case__ ) - np.asarray(snake_case__ )) ** 2 ) )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(snake_case__ , snake_case__ ) ) ** (1 / 2)
if __name__ == "__main__":
def A ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 339 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]:
lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]:
if split_mlp_wi:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowerCamelCase = (wi_a, wi_a)
else:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict:
lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 2 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
lowerCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCamelCase = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCamelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str:
lowerCamelCase = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase = UMTaEncoderModel(snake_case__ )
else:
lowerCamelCase = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 291 | 0 |
from math import ceil
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(0 , snake_case__ ) )
lowercase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase__ = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowercase__ = [i for i in blocks if i not in device_map_blocks]
lowercase__ = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(snake_case__ ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(snake_case__ ) )
lowercase__ = int(ceil(n_layers / len(snake_case__ ) ) )
lowercase__ = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 110 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> bool:
if len(snake_case__ ) == 0:
return False
lowerCamelCase = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 291 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) < 2:
return collection
def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = False
if low == high:
return swapped
lowerCamelCase = low
lowerCamelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase , lowerCamelCase = (
collection[right],
collection[left],
)
lowerCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase , lowerCamelCase = (
collection[right + 1],
collection[left],
)
lowerCamelCase = True
lowerCamelCase = low + int((high - low) / 2 )
lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ )
return swapped or left_swap or right_swap
lowerCamelCase = True
while is_not_sorted is True:
lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 291 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Dict = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 |
"""simple docstring"""
from collections.abc import Generator
def a__ ( ) -> Generator[int, None, None]:
lowerCamelCase , lowerCamelCase = 0, 1
while True:
lowerCamelCase , lowerCamelCase = b, a + b
yield b
def a__ ( snake_case__ = 10_00 ) -> int:
lowerCamelCase = 1
lowerCamelCase = fibonacci_generator()
while len(str(next(snake_case__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 291 | 0 |
import cmath
import math
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> complex:
_snake_case : Any = math.radians(snake_case__ )
_snake_case : Optional[Any] = math.radians(snake_case__ )
# Convert voltage and current to rectangular form
_snake_case : Tuple = cmath.rect(snake_case__ , snake_case__ )
_snake_case : Any = cmath.rect(snake_case__ , snake_case__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class A ( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = 'owlvit_text_model'
def __init__( self : str,lowercase_ : int=4_9_4_0_8,lowercase_ : List[Any]=5_1_2,lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : str=1_2,lowercase_ : int=8,lowercase_ : str=1_6,lowercase_ : Any="quick_gelu",lowercase_ : List[Any]=1E-5,lowercase_ : Tuple=0.0,lowercase_ : Dict=0.02,lowercase_ : Any=1.0,lowercase_ : Optional[Any]=0,lowercase_ : Any=4_9_4_0_6,lowercase_ : str=4_9_4_0_7,**lowercase_ : Dict,)-> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_a,bos_token_id=_a,eos_token_id=_a,**_a )
A__ = vocab_size
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = max_position_embeddings
A__ = hidden_act
A__ = layer_norm_eps
A__ = attention_dropout
A__ = initializer_range
A__ = initializer_factor
@classmethod
def snake_case__ ( cls : Tuple,lowercase_ : List[Any],**lowercase_ : Optional[Any] )-> List[str]:
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A__ , A__ = cls.get_config_dict(_a,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
A__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a,**_a )
class A ( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = 'owlvit_vision_model'
def __init__( self : Optional[int],lowercase_ : Any=7_6_8,lowercase_ : Tuple=3_0_7_2,lowercase_ : List[str]=1_2,lowercase_ : Optional[int]=1_2,lowercase_ : Optional[Any]=3,lowercase_ : List[str]=7_6_8,lowercase_ : List[Any]=3_2,lowercase_ : Dict="quick_gelu",lowercase_ : Optional[int]=1E-5,lowercase_ : List[str]=0.0,lowercase_ : Dict=0.02,lowercase_ : Optional[Any]=1.0,**lowercase_ : Any,)-> Dict:
'''simple docstring'''
super().__init__(**_a )
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = hidden_act
A__ = layer_norm_eps
A__ = attention_dropout
A__ = initializer_range
A__ = initializer_factor
@classmethod
def snake_case__ ( cls : str,lowercase_ : str,**lowercase_ : int )-> List[str]:
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A__ , A__ = cls.get_config_dict(_a,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
A__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a,**_a )
class A ( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = 'owlvit'
lowerCamelCase = True
def __init__( self : int,lowercase_ : List[Any]=None,lowercase_ : Optional[int]=None,lowercase_ : Dict=5_1_2,lowercase_ : Any=2.6_592,lowercase_ : Optional[Any]=True,**lowercase_ : Dict,)-> Dict:
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A__ = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
A__ = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
A__ = OwlViTTextConfig(**_a )
A__ = OwlViTVisionConfig(**_a )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = return_dict
A__ = 1.0
@classmethod
def snake_case__ ( cls : Union[str, Any],lowercase_ : Any,**lowercase_ : Tuple )-> Dict:
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A__ , A__ = cls.get_config_dict(_a,**_a )
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a,**_a )
@classmethod
def snake_case__ ( cls : Optional[Any],lowercase_ : List[str],lowercase_ : Optional[int],**lowercase_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = {}
A__ = text_config
A__ = vision_config
return cls.from_dict(_a,**_a )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
class A ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def snake_case__ ( self : Union[str, Any] )-> Any:
'''simple docstring'''
return 1E-4
def snake_case__ ( self : str,lowercase_ : List[Any],lowercase_ : Tuple = -1,lowercase_ : List[Any] = -1,lowercase_ : List[str] = None,)-> Any:
'''simple docstring'''
A__ = super().generate_dummy_inputs(
processor.tokenizer,batch_size=_a,seq_length=_a,framework=_a )
A__ = super().generate_dummy_inputs(
processor.image_processor,batch_size=_a,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
return 1_4
| 7 |
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
_snake_case : List[str] = 4_2
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , lowerCAmelCase__ : Optional[int] = 65536 , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : Dict = 2 , lowerCAmelCase__ : Optional[Any] = 2 , lowerCAmelCase__ : List[Any] = 0 , lowerCAmelCase__ : str = "fourier" , lowerCAmelCase__ : str = True , lowerCAmelCase__ : str = False , lowerCAmelCase__ : List[Any] = 0.0 , lowerCAmelCase__ : Union[str, Any] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase__ : str = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase__ : int = "UNetMidBlock1D" , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : List[Any] = (32, 32, 64) , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : Union[str, Any] = 8 , lowerCAmelCase__ : Optional[Any] = 1 , lowerCAmelCase__ : List[str] = False , ) -> List[str]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_UpperCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_a , log=_a , flip_sin_to_cos=_a )
_UpperCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_a , downscale_freq_shift=_a )
_UpperCamelCase = block_out_channels[0]
if use_timestep_embedding:
_UpperCamelCase = block_out_channels[0] * 4
_UpperCamelCase = TimestepEmbedding(
in_channels=_a , time_embed_dim=_a , act_fn=_a , out_dim=block_out_channels[0] , )
_UpperCamelCase = nn.ModuleList([] )
_UpperCamelCase = None
_UpperCamelCase = nn.ModuleList([] )
_UpperCamelCase = None
# down
_UpperCamelCase = in_channels
for i, down_block_type in enumerate(_a ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCamelCase = i == len(_a ) - 1
_UpperCamelCase = get_down_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_a )
# mid
_UpperCamelCase = get_mid_block(
_a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_a , add_downsample=_a , )
# up
_UpperCamelCase = list(reversed(_a ) )
_UpperCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCamelCase = out_channels
else:
_UpperCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(_a ):
_UpperCamelCase = output_channel
_UpperCamelCase = (
reversed_block_out_channels[i + 1] if i < len(_a ) - 1 else final_upsample_channels
)
_UpperCamelCase = i == len(_a ) - 1
_UpperCamelCase = get_up_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_a )
_UpperCamelCase = output_channel
# out
_UpperCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCamelCase = get_out_block(
out_block_type=_a , num_groups_out=_a , embed_dim=block_out_channels[0] , out_channels=_a , act_fn=_a , fc_dim=block_out_channels[-1] // 4 , )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple = True , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = timestep
if not torch.is_tensor(_a ):
_UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps[None].to(sample.device )
_UpperCamelCase = self.time_proj(_a )
if self.config.use_timestep_embedding:
_UpperCamelCase = self.time_mlp(_a )
else:
_UpperCamelCase = timestep_embed[..., None]
_UpperCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCamelCase = ()
for downsample_block in self.down_blocks:
_UpperCamelCase , _UpperCamelCase = downsample_block(hidden_states=_a , temb=_a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCamelCase = self.mid_block(_a , _a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCamelCase = down_block_res_samples[-1:]
_UpperCamelCase = down_block_res_samples[:-1]
_UpperCamelCase = upsample_block(_a , res_hidden_states_tuple=_a , temb=_a )
# 5. post-process
if self.out_block:
_UpperCamelCase = self.out_block(_a , _a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_a )
| 324 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( UpperCAmelCase__ ):
def __init__( self ) -> List[Any]:
self.test()
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 0
_lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
_lowerCAmelCase = self.advance()
if not self.does_advance(_a ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.update(_a )
counter += 1
if counter > 10000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def _snake_case ( self ) -> List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self ) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _snake_case ( self , _lowerCAmelCase=False ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( UpperCAmelCase__ ):
def __init__( self , _lowerCAmelCase ) -> List[str]:
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
_lowerCAmelCase = token_ids
_lowerCAmelCase = len(self.token_ids )
_lowerCAmelCase = -1 # the index of the currently fulfilled step
_lowerCAmelCase = False
def _snake_case ( self ) -> List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_a )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _snake_case ( self , _lowerCAmelCase ) -> Dict:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_a )}''' )
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
if self.does_advance(_a ):
self.fulfilled_idx += 1
_lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
_lowerCAmelCase = True
_lowerCAmelCase = completed
else:
# failed to make progress.
_lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = False
_lowerCAmelCase = 0
def _snake_case ( self ) -> int:
return self.seqlen - (self.fulfilled_idx + 1)
def _snake_case ( self , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
_lowerCAmelCase = self.seqlen
_lowerCAmelCase = self.fulfilled_idx
_lowerCAmelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
_lowerCAmelCase = max([len(_a ) for one in nested_token_ids] )
_lowerCAmelCase = {}
for token_ids in nested_token_ids:
_lowerCAmelCase = root
for tidx, token_id in enumerate(_a ):
if token_id not in level:
_lowerCAmelCase = {}
_lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(_a , _a ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f''' {nested_token_ids}.''' )
_lowerCAmelCase = root
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = self.trie
for current_token in current_seq:
_lowerCAmelCase = start[current_token]
_lowerCAmelCase = list(start.keys() )
return next_tokens
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.next_tokens(_a )
return len(_a ) == 0
def _snake_case ( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = list(root.values() )
if len(_a ) == 0:
return 1
else:
return sum([self.count_leaves(_a ) for nn in next_nodes] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.count_leaves(_a )
return len(_a ) != leaf_count
class lowerCAmelCase_ ( UpperCAmelCase__ ):
def __init__( self , _lowerCAmelCase ) -> Tuple:
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_a , _a ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
_lowerCAmelCase = DisjunctiveTrie(_a )
_lowerCAmelCase = nested_token_ids
_lowerCAmelCase = self.trie.max_height
_lowerCAmelCase = []
_lowerCAmelCase = False
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(_a ) == 0:
return None
else:
return token_list
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}''' )
_lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _snake_case ( self , _lowerCAmelCase ) -> str:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}''' )
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
if self.does_advance(_a ):
self.current_seq.append(_a )
_lowerCAmelCase = True
else:
_lowerCAmelCase = True
self.reset()
_lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
_lowerCAmelCase = completed
return stepped, completed, reset
def _snake_case ( self ) -> int:
_lowerCAmelCase = False
_lowerCAmelCase = []
def _snake_case ( self ) -> Union[str, Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _snake_case ( self , _lowerCAmelCase=False ) -> str:
_lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
_lowerCAmelCase = self.seqlen
_lowerCAmelCase = self.current_seq
_lowerCAmelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
_lowerCAmelCase = max([c.seqlen for c in constraints] )
_lowerCAmelCase = len(_a )
_lowerCAmelCase = False
self.init_state()
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = []
_lowerCAmelCase = None
_lowerCAmelCase = [constraint.copy(stateful=_a ) for constraint in self.constraints]
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_lowerCAmelCase = constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
else:
_lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
if len(_a ) == 0:
return None
else:
return token_list
def _snake_case ( self , _lowerCAmelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_lowerCAmelCase , _lowerCAmelCase = self.add(_a )
# the entire list of constraints are fulfilled
if self.completed:
break
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
if not isinstance(_a , _a ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
_lowerCAmelCase , _lowerCAmelCase = False, False
if self.completed:
_lowerCAmelCase = True
_lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.inprogress_constraint.update(_a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_a ) )
_lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
_lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_a ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pending_constraint.update(_a )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(_a )
_lowerCAmelCase = None
if not complete and stepped:
_lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _snake_case ( self , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_lowerCAmelCase = [
constraint.copy(stateful=_a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_lowerCAmelCase = self.inprogress_constraint.copy(stateful=_a )
_lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 158 |
"""simple docstring"""
from typing import Any
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list:
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCamelCase = {}
lowerCamelCase = {}
for state in states_space:
lowerCamelCase = observations_space[0]
lowerCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
lowerCamelCase = observations_space[o]
lowerCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
# Update probabilities and pointers dicts
lowerCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase = arg_max
# The final observation
lowerCamelCase = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
lowerCamelCase = arg_max
# Process pointers backwards
lowerCamelCase = last_state
lowerCamelCase = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
lowerCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_list(snake_case__ , """observations_space""" )
_validate_list(snake_case__ , """states_space""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list'
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list of strings'
raise ValueError(snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ )
_validate_nested_dict(snake_case__ , """transition_probabilities""" )
_validate_nested_dict(snake_case__ , """emission_probabilities""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a dict'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
lowerCamelCase = F'{var_name} all keys must be strings'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
lowerCamelCase = """nested dictionary """ if nested else """"""
lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = """AutoTokenizer"""
lowerCAmelCase_ = ["""tokenizer"""]
lowerCAmelCase_ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
super().__init__(_a )
lowerCamelCase__ = speaker_embeddings
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase="speaker_embeddings_path.json" , **__lowerCAmelCase ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowerCamelCase__ = get_file_from_repo(
_a , _a , subfolder=kwargs.pop('''subfolder''' , _a ) , cache_dir=kwargs.pop('''cache_dir''' , _a ) , force_download=kwargs.pop('''force_download''' , _a ) , proxies=kwargs.pop('''proxies''' , _a ) , resume_download=kwargs.pop('''resume_download''' , _a ) , local_files_only=kwargs.pop('''local_files_only''' , _a ) , use_auth_token=kwargs.pop('''use_auth_token''' , _a ) , revision=kwargs.pop('''revision''' , _a ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(_a , _a )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
lowerCamelCase__ = None
else:
with open(_a ) as speaker_embeddings_json:
lowerCamelCase__ = json.load(_a )
else:
lowerCamelCase__ = None
lowerCamelCase__ = AutoTokenizer.from_pretrained(_a , **_a )
return cls(tokenizer=_a , speaker_embeddings=_a )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase="speaker_embeddings_path.json" , __lowerCAmelCase="speaker_embeddings" , __lowerCAmelCase = False , **__lowerCAmelCase , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_a , _a , '''v2''' ) , exist_ok=_a )
lowerCamelCase__ = {}
lowerCamelCase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCamelCase__ = self._load_voice_preset(_a )
lowerCamelCase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , _a , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=_a , )
lowerCamelCase__ = os.path.join(_a , F'{prompt_key}_{key}.npy' )
lowerCamelCase__ = tmp_dict
with open(os.path.join(_a , _a ) , '''w''' ) as fp:
json.dump(_a , _a )
super().save_pretrained(_a , _a , **_a )
def __lowerCamelCase ( self , __lowerCAmelCase = None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.speaker_embeddings[voice_preset]
lowerCamelCase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
lowerCamelCase__ = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _a ) , cache_dir=kwargs.pop('''cache_dir''' , _a ) , force_download=kwargs.pop('''force_download''' , _a ) , proxies=kwargs.pop('''proxies''' , _a ) , resume_download=kwargs.pop('''resume_download''' , _a ) , local_files_only=kwargs.pop('''local_files_only''' , _a ) , use_auth_token=kwargs.pop('''use_auth_token''' , _a ) , revision=kwargs.pop('''revision''' , _a ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
lowerCamelCase__ = np.load(_a )
return voice_preset_dict
def __lowerCamelCase ( self , __lowerCAmelCase = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="pt" , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , **__lowerCAmelCase , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_a , _a ):
if (
isinstance(_a , _a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCamelCase__ = self._load_voice_preset(_a )
else:
if isinstance(_a , _a ) and not voice_preset.endswith('''.npz''' ):
lowerCamelCase__ = voice_preset + '''.npz'''
lowerCamelCase__ = np.load(_a )
if voice_preset is not None:
self._validate_voice_preset_dict(_a , **_a )
lowerCamelCase__ = BatchFeature(data=_a , tensor_type=_a )
lowerCamelCase__ = self.tokenizer(
_a , return_tensors=_a , padding='''max_length''' , max_length=_a , return_attention_mask=_a , return_token_type_ids=_a , add_special_tokens=_a , **_a , )
if voice_preset is not None:
lowerCamelCase__ = voice_preset
return encoded_text
| 209 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
if "model" in sd.keys():
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case__ )
lowerCamelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase = sd.pop(snake_case__ )
lowerCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
lowerCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 )
lowerCamelCase = q
lowerCamelCase = k
lowerCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple:
lowerCamelCase = load_checkpoint(snake_case__ )
if config is not None:
lowerCamelCase = OPTConfig.from_pretrained(snake_case__ )
else:
lowerCamelCase = OPTConfig()
lowerCamelCase = OPTModel(snake_case__ ).half().eval()
model.load_state_dict(snake_case__ )
# Check results
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 291 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : List[Any] = 4
class lowercase ( UpperCAmelCase__ ):
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : int = "left"
__lowercase : str = XLNetTokenizer
def __init__( self , A_=None , A_=None , A_=False , A_=True , A_=False , A_="<s>" , A_="</s>" , A_="<unk>" , A_="<sep>" , A_="<pad>" , A_="<cls>" , A_="<mask>" , A_=["<eop>", "<eod>"] , **A_ , ) -> int:
"""simple docstring"""
UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCamelCase ( self , A_ , A_ = None ) -> Union[str, Any]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 222 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
# fmt: off
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(_a , return_tensors="""np""" )
lowerCamelCase = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=_a )
lowerCamelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(_a )
lowerCamelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291 | 0 |
"""simple docstring"""
from math import sqrt
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = 0
for i in range(1 , int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0 ):
"""simple docstring"""
__A = sum(
i
for i in range(1 , snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 266 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def a__ ( ) -> List[str]:
lowerCamelCase = parse_args()
# Import training_script as a module.
lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase = script_fpath.stem
lowerCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291 | 0 |
UpperCAmelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case__ , snake_case__ , snake_case__ )
order.append(snake_case__ )
return order
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case__ , snake_case__ , snake_case__ )
return component
def A ( _UpperCAmelCase : Optional[Any] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = len(snake_case__ ) * [False]
_UpperCAmelCase = {vert: [] for vert in range(len(snake_case__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case__ )
_UpperCAmelCase = []
for i, was_visited in enumerate(snake_case__ ):
if not was_visited:
order += topology_sort(snake_case__ , snake_case__ , snake_case__ )
_UpperCAmelCase = []
_UpperCAmelCase = len(snake_case__ ) * [False]
for i in range(len(snake_case__ ) ):
_UpperCAmelCase = order[len(snake_case__ ) - i - 1]
if not visited[vert]:
_UpperCAmelCase = find_components(snake_case__ , snake_case__ , snake_case__ )
components_list.append(snake_case__ )
return components_list
| 339 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "sew-d"
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCamelCase = hidden_size
lowerCamelCase = feat_extract_norm
lowerCamelCase = feat_extract_activation
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = conv_bias
lowerCamelCase = num_conv_pos_embeddings
lowerCamelCase = num_conv_pos_embedding_groups
lowerCamelCase = len(self.conv_dim )
lowerCamelCase = num_hidden_layers
lowerCamelCase = intermediate_size
lowerCamelCase = squeeze_factor
lowerCamelCase = max_position_embeddings
lowerCamelCase = position_buckets
lowerCamelCase = share_att_key
lowerCamelCase = relative_attention
lowerCamelCase = norm_rel_ebd
lowerCamelCase = list(_a )
lowerCamelCase = hidden_act
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = feat_proj_dropout
lowerCamelCase = final_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = feature_layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase = apply_spec_augment
lowerCamelCase = mask_time_prob
lowerCamelCase = mask_time_length
lowerCamelCase = mask_time_min_masks
lowerCamelCase = mask_feature_prob
lowerCamelCase = mask_feature_length
lowerCamelCase = mask_feature_min_masks
# ctc loss
lowerCamelCase = ctc_loss_reduction
lowerCamelCase = ctc_zero_infinity
# sequence classification
lowerCamelCase = use_weighted_layer_sum
lowerCamelCase = classifier_proj_size
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 | 0 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join(chr(ord(snake_case__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 110 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Any = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ):
"""simple docstring"""
lowerCamelCase = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 291 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCAmelCase_ = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 131_072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
}
def lowerCamelCase__ ( A__ : Any , A__ : List[str] ):
'''simple docstring'''
return torch.atana(snake_case__ , snake_case__ ) / math.pi * 2
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2
__lowerCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case__ , snake_case__ )
class lowerCamelCase__( UpperCAmelCase__):
pass
class lowerCamelCase__( nn.Module):
def __init__( self: str , UpperCamelCase_: List[str] ):
super().__init__()
__lowerCamelCase = DiffusionAttnUnetaD(_a , n_attn_layers=4 )
__lowerCamelCase = deepcopy(self.diffusion )
__lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=_a )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
UpperCAmelCase_ = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
UpperCAmelCase_ = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
UpperCAmelCase_ = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
UpperCAmelCase_ = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
UpperCAmelCase_ = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
UpperCAmelCase_ = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
return name.replace(snake_case__ , snake_case__ )
elif name.startswith(snake_case__ ):
return [name.replace(snake_case__ , snake_case__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def lowerCamelCase__ ( A__ : Any , A__ : Union[str, Any]=13 ):
'''simple docstring'''
__lowerCamelCase = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__lowerCamelCase = 0
if string.startswith("""net.3.""" ):
depth += 1
__lowerCamelCase = string[6:]
elif string.startswith("""net.""" ):
__lowerCamelCase = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__lowerCamelCase = string[7:]
if string.startswith("""main.""" ):
__lowerCamelCase = string[5:]
# mid block
if string[:2].isdigit():
__lowerCamelCase = string[:2]
__lowerCamelCase = string[2:]
else:
__lowerCamelCase = string[0]
__lowerCamelCase = string[1:]
if depth == max_depth:
__lowerCamelCase = MID_NUM_TO_LAYER[layer_num]
__lowerCamelCase = """mid_block"""
elif depth > 0 and int(snake_case__ ) < 7:
__lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num]
__lowerCamelCase = f'down_blocks.{depth}'
elif depth > 0 and int(snake_case__ ) > 7:
__lowerCamelCase = UP_NUM_TO_LAYER[layer_num]
__lowerCamelCase = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
__lowerCamelCase = DEPTH_0_TO_LAYER[layer_num]
__lowerCamelCase = f'up_blocks.{max_depth - 1}' if int(snake_case__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
__lowerCamelCase = string_left[1:]
if "resnets" in new_layer:
__lowerCamelCase = convert_resconv_naming(snake_case__ )
elif "attentions" in new_layer:
__lowerCamelCase = convert_attn_naming(snake_case__ )
__lowerCamelCase = new_string_left
if not isinstance(snake_case__ , snake_case__ ):
__lowerCamelCase = prefix + """.""" + new_layer + """.""" + string_left
else:
__lowerCamelCase = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__lowerCamelCase = rename(snake_case__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case__ , snake_case__ ):
__lowerCamelCase = transform_conv_attns(snake_case__ , snake_case__ , snake_case__ )
else:
__lowerCamelCase = v
return new_state_dict
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : Tuple ):
'''simple docstring'''
if len(snake_case__ ) == 1:
if len(v.shape ) == 3:
# weight
__lowerCamelCase = v[:, :, 0]
else:
# bias
__lowerCamelCase = v
else:
# qkv matrices
__lowerCamelCase = v.shape[0]
__lowerCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowerCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__lowerCamelCase = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
__lowerCamelCase = download(snake_case__ )
__lowerCamelCase = MODELS_MAP[model_name]["""sample_rate"""]
__lowerCamelCase = MODELS_MAP[model_name]["""sample_size"""]
__lowerCamelCase = Object()
__lowerCamelCase = sample_size
__lowerCamelCase = sample_rate
__lowerCamelCase = 0
__lowerCamelCase = UNetaDModel(sample_size=snake_case__ , sample_rate=snake_case__ )
__lowerCamelCase = diffusers_model.state_dict()
__lowerCamelCase = DiffusionUncond(snake_case__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case__ )["""state_dict"""] )
__lowerCamelCase = orig_model.diffusion_ema.eval()
__lowerCamelCase = orig_model.state_dict()
__lowerCamelCase = rename_orig_weights(snake_case__ )
__lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(snake_case__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
__lowerCamelCase = value.squeeze()
__lowerCamelCase = value
diffusers_model.load_state_dict(snake_case__ )
__lowerCamelCase = 100
__lowerCamelCase = 33
__lowerCamelCase = IPNDMScheduler(num_train_timesteps=snake_case__ )
__lowerCamelCase = torch.manual_seed(snake_case__ )
__lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=snake_case__ ).to(snake_case__ )
__lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=snake_case__ )[:-1]
__lowerCamelCase = get_crash_schedule(snake_case__ )
__lowerCamelCase = DanceDiffusionPipeline(unet=snake_case__ , scheduler=snake_case__ )
__lowerCamelCase = torch.manual_seed(33 )
__lowerCamelCase = pipe(num_inference_steps=snake_case__ , generator=snake_case__ ).audios
__lowerCamelCase = sampling.iplms_sample(snake_case__ , snake_case__ , snake_case__ , {} )
__lowerCamelCase = generated.clamp(-1 , 1 )
__lowerCamelCase = (generated - audio).abs().sum()
__lowerCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , snake_case__ )
print("""Diff max""" , snake_case__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase_ = parser.parse_args()
main(args)
| 12 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = process
lowerCamelCase = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
lowerCamelCase = self.dataset[i]
lowerCamelCase = self.process(_a , **self.params )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = loader
lowerCamelCase = infer
lowerCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase = None
lowerCamelCase = loader_batch_size
# Internal bookkeeping
lowerCamelCase = None
lowerCamelCase = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
lowerCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase = self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase = next(self.iterator )
lowerCamelCase = self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase = processed
lowerCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
super().__init__(_a , _a , _a )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
lowerCamelCase = None
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.subiterator is None:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase = next(self.subiterator )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase = False
lowerCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
lowerCamelCase = processed
lowerCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
else:
lowerCamelCase = processed
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
return accumulator
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return self.dataset[i][self.key]
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = keya
lowerCamelCase = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 291 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
__a : List[str] = CustomTokenizer
pass | 210 |
"""simple docstring"""
def a__ ( snake_case__ ) -> bool:
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( snake_case__ = 50_00 ) -> int:
lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )]
for i, pentagonal_i in enumerate(snake_case__ ):
for j in range(snake_case__ , len(snake_case__ ) ):
lowerCamelCase = pentagonal_nums[j]
lowerCamelCase = pentagonal_i + pentagonal_j
lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 291 | 0 |
from __future__ import annotations
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] = None ) -> list[list[str]]:
_snake_case : Dict = word_bank or []
# create a table
_snake_case : int = len(snake_case__ ) + 1
_snake_case : Union[str, Any] = []
for _ in range(snake_case__ ):
table.append([] )
# seed value
_snake_case : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case__ )] == word:
_snake_case : Optional[Any] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case__ )]:
combination.reverse()
return table[len(snake_case__ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 317 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
with open(snake_case__ , """rb""" ) as flax_state_f:
lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
lowerCamelCase = """"""
lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" )
lowerCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase = []
lowerCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
lowerCamelCase = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCamelCase = """.""".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
lowerCamelCase = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
lowerCamelCase = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 291 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case__ , snake_case__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case__ , snake_case__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 291 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ : Optional[int] = 2
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , *, # begin keyword-only arguments
lowerCAmelCase__ : List[str]="<s>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Dict="<unk>" , lowerCAmelCase__ : Any=None , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = bos, unk, pad, eos
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = {}
_UpperCamelCase = self.add_symbol(_a )
_UpperCamelCase = self.add_symbol(_a )
_UpperCamelCase = self.add_symbol(_a )
_UpperCamelCase = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
_UpperCamelCase = len(self.symbols )
def __eq__( self : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> Any:
'''simple docstring'''
return len(self.symbols )
def __contains__( self : int , lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
return sym in self.indices
@classmethod
def snake_case__ ( cls : str , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = cls()
d.add_from_file(_a )
return d
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if word in self.indices and not overwrite:
_UpperCamelCase = self.indices[word]
_UpperCamelCase = self.count[idx] + n
return idx
else:
_UpperCamelCase = len(self.symbols )
_UpperCamelCase = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
return 0
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
if isinstance(_a , _a ):
try:
with open(_a , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_a ) )
return
_UpperCamelCase = f.readlines()
_UpperCamelCase = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
_UpperCamelCase , _UpperCamelCase = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_UpperCamelCase = True
_UpperCamelCase , _UpperCamelCase = line.rsplit(''' ''' , 1 )
else:
_UpperCamelCase = False
_UpperCamelCase = int(_a )
_UpperCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_a ) )
self.add_symbol(_a , n=_a , overwrite=_a )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = dict((re.sub(r'''@@$''', '''''', snake_case__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''', '''</w>''', snake_case__ ), v) for k, v in d.items() )
_UpperCamelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCamelCase = d[k] # restore
return da
def a__ ( lowercase : Optional[int], lowercase : List[Any] ) -> Dict:
"""simple docstring"""
if not os.path.exists(snake_case__ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(snake_case__, exist_ok=snake_case__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCamelCase = os.path.join(snake_case__, '''checkpoint.pt''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCamelCase = torch.load(snake_case__, map_location='''cpu''' )
_UpperCamelCase = chkpt['''cfg''']['''model''']
# dicts
_UpperCamelCase = os.path.join(snake_case__, '''dict.txt''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCamelCase = Dictionary.load(snake_case__ )
_UpperCamelCase = rewrite_dict_keys(src_dict.indices )
_UpperCamelCase = len(snake_case__ )
_UpperCamelCase = os.path.join(snake_case__, VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(snake_case__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__, ensure_ascii=snake_case__, indent=snake_case__ ) )
# merges_file (bpecodes)
_UpperCamelCase = os.path.join(snake_case__, '''bpecodes''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCamelCase = os.path.join(snake_case__, VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(snake_case__, snake_case__ )
# model config
_UpperCamelCase = os.path.join(snake_case__, '''config.json''' )
_UpperCamelCase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(snake_case__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__, ensure_ascii=snake_case__, indent=snake_case__ ) )
# tokenizer config
_UpperCamelCase = os.path.join(snake_case__, snake_case__ )
_UpperCamelCase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(snake_case__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__, ensure_ascii=snake_case__, indent=snake_case__ ) )
# model
_UpperCamelCase = chkpt['''model''']
# remove unneeded keys
_UpperCamelCase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(snake_case__, snake_case__ )
_UpperCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_UpperCamelCase = model_state_dict.pop(snake_case__ )
else:
_UpperCamelCase = model_state_dict.pop(snake_case__ )
_UpperCamelCase = BioGptConfig.from_pretrained(snake_case__ )
_UpperCamelCase = BioGptForCausalLM(snake_case__ )
# check that it loads ok
model_new.load_state_dict(snake_case__ )
# save
_UpperCamelCase = os.path.join(snake_case__, snake_case__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(snake_case__, snake_case__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ : Tuple = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 324 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def __a(SCREAMING_SNAKE_CASE_ : Tuple = 8 , SCREAMING_SNAKE_CASE_ : int = None ):
'''simple docstring'''
_lowerCAmelCase = np.random.default_rng(seed=snake_case__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase = rng.integers(2 , size=snake_case__ )
# The set of states Alice will prepare.
_lowerCAmelCase = rng.integers(2 , size=snake_case__ )
# Measurement basis for Bob's qubits.
_lowerCAmelCase = rng.integers(2 , size=snake_case__ )
# Quantum Circuit to simulate BB84
_lowerCAmelCase = qiskit.QuantumCircuit(snake_case__ , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__ )
# Returns the result of measurement.
_lowerCAmelCase = job.result().get_counts(snake_case__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase = gen_key[:key_len] if len(snake_case__ ) >= key_len else gen_key.ljust(snake_case__ , "0" )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 158 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ) -> list[int]:
'''simple docstring'''
lowerCamelCase__ = 2
lowerCamelCase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = resample
lowerCamelCase = do_center_crop
lowerCamelCase = crop_size
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_normalize
lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
"""simple docstring"""
lowerCamelCase = do_resize if do_resize is not None else self.do_resize
lowerCamelCase = size if size is not None else self.size
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = resample if resample is not None else self.resample
lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase = crop_size if crop_size is not None else self.crop_size
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase = image_mean if image_mean is not None else self.image_mean
lowerCamelCase = image_std if image_std is not None else self.image_std
lowerCamelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_a ):
lowerCamelCase = target_sizes.numpy()
lowerCamelCase = []
for idx in range(len(_a ) ):
lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a )
lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowerCamelCase = logits.argmax(dim=1 )
lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 291 | 0 |
from math import factorial
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(snake_case__ ) // (factorial(snake_case__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 222 |
"""simple docstring"""
import operator as op
lowerCAmelCase : Dict = """scaler.pt"""
lowerCAmelCase : Tuple = """pytorch_model"""
lowerCAmelCase : Union[str, Any] = """random_states"""
lowerCAmelCase : Union[str, Any] = """optimizer"""
lowerCAmelCase : Dict = """scheduler"""
lowerCAmelCase : int = """pytorch_model.bin"""
lowerCAmelCase : str = """pytorch_model.bin.index.json"""
lowerCAmelCase : Union[str, Any] = """model.safetensors"""
lowerCAmelCase : List[Any] = """model.safetensors.index.json"""
lowerCAmelCase : List[Any] = """1.10.2"""
lowerCAmelCase : Any = """py38"""
lowerCAmelCase : Optional[int] = """4.17.0"""
lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCAmelCase : Any = """2.0.1"""
lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase : Union[str, Any] = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 291 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : Dict, _lowerCamelCase : Tuple=7, _lowerCamelCase : Optional[Any]=3, _lowerCamelCase : List[str]=18, _lowerCamelCase : List[Any]=30, _lowerCamelCase : Union[str, Any]=4_00, _lowerCamelCase : Tuple=True, _lowerCamelCase : str=None, _lowerCamelCase : Union[str, Any]=True, ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a, '''do_resize''' ) )
self.assertTrue(hasattr(_a, '''size''' ) )
self.assertTrue(hasattr(_a, '''apply_ocr''' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
self.assertIsInstance(encoding.words, _a )
self.assertIsInstance(encoding.boxes, _a )
# Test batched
__A = image_processing(_a, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_a, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_a, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' )
__A = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__A = image_processing(_a, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _a )
self.assertListEqual(encoding.boxes, _a )
# with apply_OCR = False
__A = LayoutLMvaImageProcessor(apply_ocr=_a )
__A = image_processing(_a, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 266 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Any:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]:
lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]:
if split_mlp_wi:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowerCamelCase = (wi_a, wi_a)
else:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict:
lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 2 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
lowerCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCamelCase = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCamelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str:
lowerCamelCase = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase = UMTaEncoderModel(snake_case__ )
else:
lowerCamelCase = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 291 | 0 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(snake_case__ )
for i in range(length - 1 ):
lowercase__ = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowercase__ = k
if least != i:
lowercase__ , lowercase__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 110 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> bool:
if len(snake_case__ ) == 0:
return False
lowerCamelCase = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 291 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCamelCase__( UpperCAmelCase__):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase__ ( self: List[str] ):
with self.assertRaises(_a ):
__lowerCamelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowerCAmelCase__ ( self: Optional[int] ):
with self.assertRaises(_a ):
__lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase__ ( self: Union[str, Any] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowerCAmelCase__ ( self: Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowerCAmelCase__ ( self: int ):
import PIL.Image
__lowerCamelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_a ) as mock_cast_to_python_objects:
__lowerCamelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
__lowerCamelCase, __lowerCamelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _a )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def lowerCamelCase__ ( A__ : int , A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = pa.BufferReader(snake_case__ ) if isinstance(snake_case__ , pa.Buffer ) else pa.memory_map(snake_case__ )
__lowerCamelCase = pa.ipc.open_stream(snake_case__ )
__lowerCamelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCamelCase__ ( A__ : int , A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
__lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
__lowerCamelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=snake_case__ , features=snake_case__ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__lowerCamelCase = pa.BufferReader(output.getvalue() )
__lowerCamelCase = pa.ipc.open_stream(snake_case__ )
__lowerCamelCase = f.read_all()
__lowerCamelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case__ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
with pytest.raises(snake_case__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
with pytest.raises(snake_case__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
__lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCamelCase__ ( A__ : Any , A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
__lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
__lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
__lowerCamelCase = os.path.join(snake_case__ , """test.arrow""" )
with ArrowWriter(path=snake_case__ , schema=pa.schema(snake_case__ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(snake_case__ , 1 )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
if pa.types.is_list(snake_case__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Dict ):
'''simple docstring'''
if isinstance(lst[0] , snake_case__ ):
change_first_primitive_element_in_list(lst[0] , snake_case__ )
else:
__lowerCamelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( A__ : List[str] , A__ : Tuple , A__ : Any ):
'''simple docstring'''
__lowerCamelCase = pa.array(TypedSequence(snake_case__ , optimized_int_type=snake_case__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[Any] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = pa.array(OptimizedTypedSequence(snake_case__ , col=snake_case__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__lowerCamelCase = copy.deepcopy(snake_case__ )
__lowerCamelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(snake_case__ , snake_case__ )
__lowerCamelCase = pa.array(OptimizedTypedSequence(snake_case__ , col=snake_case__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def lowerCamelCase__ ( A__ : Tuple , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=snake_case__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
__lowerCamelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=snake_case__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(snake_case__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__lowerCamelCase, __lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__lowerCamelCase = pa.BufferReader(output.getvalue() )
__lowerCamelCase = pq.read_table(snake_case__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def lowerCamelCase__ ( A__ : Tuple , A__ : int ):
'''simple docstring'''
import PIL.Image
__lowerCamelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(snake_case__ , format="""png""" )
__lowerCamelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case__ , features=Features({"""image""": Image()} ) , embed_local_files=snake_case__ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
__lowerCamelCase = pa.BufferReader(output.getvalue() )
__lowerCamelCase = pq.read_table(snake_case__ )
__lowerCamelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , snake_case__ )
with open(snake_case__ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=snake_case__ )] )
__lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case__ ) as writer:
writer._build_writer(inferred_schema=snake_case__ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 12 |
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) < 2:
return collection
def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = False
if low == high:
return swapped
lowerCamelCase = low
lowerCamelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase , lowerCamelCase = (
collection[right],
collection[left],
)
lowerCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase , lowerCamelCase = (
collection[right + 1],
collection[left],
)
lowerCamelCase = True
lowerCamelCase = low + int((high - low) / 2 )
lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ )
return swapped or left_swap or right_swap
lowerCamelCase = True
while is_not_sorted is True:
lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 291 | 0 |
def UpperCAmelCase ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
__lowercase = len(set_a.intersection(snake_case__ ) )
if alternative_union:
__lowercase = len(snake_case__ ) + len(snake_case__ )
else:
__lowercase = len(set_a.union(snake_case__ ) )
return intersection / union
if isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) ):
__lowercase = [element for element in set_a if element in set_b]
if alternative_union:
__lowercase = len(snake_case__ ) + len(snake_case__ )
return len(snake_case__ ) / union
else:
__lowercase = set_a + [element for element in set_b if element not in set_a]
return len(snake_case__ ) / len(snake_case__ )
return len(snake_case__ ) / len(snake_case__ )
return None
if __name__ == "__main__":
__a : Dict = {"""a""", """b""", """c""", """d""", """e"""}
__a : int = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b)) | 210 |
"""simple docstring"""
from collections.abc import Generator
def a__ ( ) -> Generator[int, None, None]:
lowerCamelCase , lowerCamelCase = 0, 1
while True:
lowerCamelCase , lowerCamelCase = b, a + b
yield b
def a__ ( snake_case__ = 10_00 ) -> int:
lowerCamelCase = 1
lowerCamelCase = fibonacci_generator()
while len(str(next(snake_case__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 291 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : int=2 , lowerCAmelCase : Any=8 , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Any=2 , lowerCAmelCase : List[str]=36 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : List[Any]=16 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=None , ) -> Tuple:
"""simple docstring"""
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[str] = seq_length
_snake_case : Any = is_training
_snake_case : List[str] = use_input_mask
_snake_case : Tuple = use_token_type_ids
_snake_case : Optional[Any] = use_labels
_snake_case : Any = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Dict = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = num_labels
_snake_case : str = num_choices
_snake_case : Dict = scope
def UpperCamelCase_ ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_snake_case : Dict = None
if self.use_input_mask:
_snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
_snake_case : Union[str, Any] = None
if self.use_token_type_ids:
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_snake_case : Dict = None
_snake_case : Optional[int] = None
_snake_case : str = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_snake_case : Any = ids_tensor([self.batch_size] , self.num_choices)
_snake_case : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[str] = self.get_config()
_snake_case : List[str] = 300
return config
def UpperCamelCase_ ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[str] = self.prepare_config_and_inputs()
_snake_case : Dict = True
_snake_case : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
_snake_case : Union[str, Any] = MraModel(config=_a)
model.to(_a)
model.eval()
_snake_case : Tuple = model(_a , attention_mask=_a , token_type_ids=_a)
_snake_case : List[str] = model(_a , token_type_ids=_a)
_snake_case : List[Any] = model(_a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = True
_snake_case : str = MraModel(_a)
model.to(_a)
model.eval()
_snake_case : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_snake_case : int = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
_snake_case : List[str] = model(_a , attention_mask=_a , token_type_ids=_a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = MraForMaskedLM(config=_a)
model.to(_a)
model.eval()
_snake_case : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : str) -> Any:
"""simple docstring"""
_snake_case : List[Any] = MraForQuestionAnswering(config=_a)
model.to(_a)
model.eval()
_snake_case : Optional[Any] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
_snake_case : int = self.num_labels
_snake_case : Optional[Any] = MraForSequenceClassification(_a)
model.to(_a)
model.eval()
_snake_case : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
_snake_case : Any = self.num_labels
_snake_case : int = MraForTokenClassification(config=_a)
model.to(_a)
model.eval()
_snake_case : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase_ ( self : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
_snake_case : Any = self.num_choices
_snake_case : List[Any] = MraForMultipleChoice(config=_a)
model.to(_a)
model.eval()
_snake_case : Dict = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_snake_case : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_snake_case : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_snake_case : int = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase_ ( self : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ : List[Any] = False
snake_case_ : Union[str, Any] = False
snake_case_ : Optional[Any] = False
snake_case_ : Optional[Any] = False
snake_case_ : Any = ()
def UpperCamelCase_ ( self : int) -> Optional[int]:
"""simple docstring"""
_snake_case : Union[str, Any] = MraModelTester(self)
_snake_case : Tuple = ConfigTester(self , config_class=_a , hidden_size=37)
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a)
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case : List[Any] = type
self.model_tester.create_and_check_model(*_a)
def UpperCamelCase_ ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a)
def UpperCamelCase_ ( self : List[str]) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a)
def UpperCamelCase_ ( self : Any) -> int:
"""simple docstring"""
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a)
def UpperCamelCase_ ( self : Dict) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a)
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a)
@slow
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = MraModel.from_pretrained(_a)
self.assertIsNotNone(_a)
@unittest.skip(reason="""MRA does not output attentions""")
def UpperCamelCase_ ( self : Optional[int]) -> str:
"""simple docstring"""
return
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = MraModel.from_pretrained("""uw-madison/mra-base-512-4""")
_snake_case : Any = torch.arange(256).unsqueeze(0)
with torch.no_grad():
_snake_case : str = model(_a)[0]
_snake_case : str = torch.Size((1, 256, 768))
self.assertEqual(output.shape , _a)
_snake_case : Optional[Any] = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4))
@slow
def UpperCamelCase_ ( self : Dict) -> Any:
"""simple docstring"""
_snake_case : Tuple = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""")
_snake_case : Tuple = torch.arange(256).unsqueeze(0)
with torch.no_grad():
_snake_case : Dict = model(_a)[0]
_snake_case : Union[str, Any] = 5_0265
_snake_case : Dict = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , _a)
_snake_case : Dict = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4))
@slow
def UpperCamelCase_ ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Tuple = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""")
_snake_case : str = torch.arange(4096).unsqueeze(0)
with torch.no_grad():
_snake_case : str = model(_a)[0]
_snake_case : Any = 5_0265
_snake_case : Tuple = torch.Size((1, 4096, vocab_size))
self.assertEqual(output.shape , _a)
_snake_case : Optional[int] = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4))
| 317 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
A__ = (
'Wrong input data\'s dimensions... '
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ = (
'Wrong input data\'s shape... '
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
A__ = (
'Input data have different datatype... '
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(snake_case__ )
A__ = []
for value in value_array:
A__ = euclidean(snake_case__ , dataset[0] )
A__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
A__ = temp_dist
A__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> float:
'''simple docstring'''
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase__ : str = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
_snake_case : List[str] = None
_snake_case : Tuple = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
_snake_case : Dict = datasets.Audio()
_snake_case : Optional[Any] = 'audio'
_snake_case : str = AudioFolderConfig
_snake_case : Union[str, Any] = 4_2 # definition at the bottom of the script
_snake_case : Tuple = AudioClassification(audio_column='audio' , label_column='label' )
lowercase__ : Tuple = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowercase__ : Dict = AUDIO_EXTENSIONS
| 324 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 2_56
# Modulus to hash a string
_SCREAMING_SNAKE_CASE = 1_00_00_03
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = len(snake_case__ )
_lowerCAmelCase = len(snake_case__ )
if p_len > t_len:
return False
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
_lowerCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowerCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowerCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowerCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a():
'''simple docstring'''
_lowerCAmelCase = "abc1abc12"
_lowerCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_lowerCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ )
# Test 2)
_lowerCAmelCase = "ABABX"
_lowerCAmelCase = "ABABZABABYABABX"
assert rabin_karp(snake_case__ , snake_case__ )
# Test 3)
_lowerCAmelCase = "AAAB"
_lowerCAmelCase = "ABAAAAAB"
assert rabin_karp(snake_case__ , snake_case__ )
# Test 4)
_lowerCAmelCase = "abcdabcy"
_lowerCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(snake_case__ , snake_case__ )
# Test 5)
_lowerCAmelCase = "Lü"
_lowerCAmelCase = "Lüsai"
assert rabin_karp(snake_case__ , snake_case__ )
_lowerCAmelCase = "Lue"
assert not rabin_karp(snake_case__ , snake_case__ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 158 |
"""simple docstring"""
from typing import Any
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list:
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCamelCase = {}
lowerCamelCase = {}
for state in states_space:
lowerCamelCase = observations_space[0]
lowerCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
lowerCamelCase = observations_space[o]
lowerCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
# Update probabilities and pointers dicts
lowerCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase = arg_max
# The final observation
lowerCamelCase = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
lowerCamelCase = arg_max
# Process pointers backwards
lowerCamelCase = last_state
lowerCamelCase = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
lowerCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_list(snake_case__ , """observations_space""" )
_validate_list(snake_case__ , """states_space""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list'
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list of strings'
raise ValueError(snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ )
_validate_nested_dict(snake_case__ , """transition_probabilities""" )
_validate_nested_dict(snake_case__ , """emission_probabilities""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a dict'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
lowerCamelCase = F'{var_name} all keys must be strings'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
lowerCamelCase = """nested dictionary """ if nested else """"""
lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_2 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_a ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFBlipTextModel(config=_a )
lowerCamelCase__ = model(_a , attention_mask=_a , training=_a )
lowerCamelCase__ = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowerCamelCase ( self , __lowerCAmelCase=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_a )
| 209 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
if "model" in sd.keys():
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case__ )
lowerCamelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase = sd.pop(snake_case__ )
lowerCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
lowerCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 )
lowerCamelCase = q
lowerCamelCase = k
lowerCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple:
lowerCamelCase = load_checkpoint(snake_case__ )
if config is not None:
lowerCamelCase = OPTConfig.from_pretrained(snake_case__ )
else:
lowerCamelCase = OPTConfig()
lowerCamelCase = OPTModel(snake_case__ ).half().eval()
model.load_state_dict(snake_case__ )
# Check results
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 291 | 0 |
from scipy.stats import spearmanr
import datasets
_UpperCAmelCase : Any = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_UpperCAmelCase : Optional[int] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_UpperCAmelCase : Dict = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 222 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
# fmt: off
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(_a , return_tensors="""np""" )
lowerCamelCase = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=_a )
lowerCamelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(_a )
lowerCamelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = torch.device('cpu')
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = dct.pop(snake_case__ )
__A = val
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
for k in state_dict.keys():
__A = k
if ".pwconv" in k:
__A = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
__A = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
__A = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
__A = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
__A = k_new.split('''.''' )
if ls[2].isdigit():
__A = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
__A = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = '''imagenet-1k-id2label.json'''
__A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(snake_case__ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__A = [3, 3, 6, 4]
__A = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
__A = [3, 3, 9, 6]
__A = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
__A = [4, 3, 1_0, 5]
__A = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
__A = [4, 4, 1_2, 6]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
__A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' , check_hash=snake_case__ )
else:
__A = torch.load(snake_case__ , map_location='''cpu''' )
__A = checkpoint
__A = create_rename_keys(snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
__A = SwiftFormerForImageClassification(snake_case__ ).eval()
hf_model.load_state_dict(snake_case__ )
# prepare test inputs
__A = prepare_img()
__A = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
__A = processor(images=snake_case__ , return_tensors='''pt''' )
# compare outputs from both models
__A = get_expected_output(snake_case__ )
__A = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case__ , atol=1e-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
lowercase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 266 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def a__ ( ) -> List[str]:
lowerCamelCase = parse_args()
# Import training_script as a module.
lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase = script_fpath.stem
lowerCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase__ ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BridgeTowerImageProcessor'''
UpperCamelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : int , A : List[Any] , A : Optional[int]) -> Any:
"""simple docstring"""
super().__init__(_a , _a)
def __call__( self : int , A : Optional[int] , A : Union[str, Any] = None , A : Any = True , A : int = False , A : Optional[Any] = None , A : Dict = None , A : Union[str, Any] = 0 , A : int = None , A : List[str] = None , A : Optional[Any] = None , A : Dict = False , A : Any = False , A : Optional[Any] = False , A : Optional[Any] = False , A : Tuple = True , A : Union[str, Any] = None , **A : Dict , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a)
encoding.update(_a)
return encoding
def _lowerCamelCase ( self : List[Any] , *A : int , **A : Union[str, Any]) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a)
def _lowerCamelCase ( self : Any , *A : List[Any] , **A : Tuple) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a)
@property
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 339 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "sew-d"
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCamelCase = hidden_size
lowerCamelCase = feat_extract_norm
lowerCamelCase = feat_extract_activation
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = conv_bias
lowerCamelCase = num_conv_pos_embeddings
lowerCamelCase = num_conv_pos_embedding_groups
lowerCamelCase = len(self.conv_dim )
lowerCamelCase = num_hidden_layers
lowerCamelCase = intermediate_size
lowerCamelCase = squeeze_factor
lowerCamelCase = max_position_embeddings
lowerCamelCase = position_buckets
lowerCamelCase = share_att_key
lowerCamelCase = relative_attention
lowerCamelCase = norm_rel_ebd
lowerCamelCase = list(_a )
lowerCamelCase = hidden_act
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = feat_proj_dropout
lowerCamelCase = final_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = feature_layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase = apply_spec_augment
lowerCamelCase = mask_time_prob
lowerCamelCase = mask_time_length
lowerCamelCase = mask_time_min_masks
lowerCamelCase = mask_feature_prob
lowerCamelCase = mask_feature_length
lowerCamelCase = mask_feature_min_masks
# ctc loss
lowerCamelCase = ctc_loss_reduction
lowerCamelCase = ctc_zero_infinity
# sequence classification
lowerCamelCase = use_weighted_layer_sum
lowerCamelCase = classifier_proj_size
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
_lowercase : List[str] = '''data2vec-text'''
def __init__( self: Union[str, Any] , UpperCamelCase_: List[Any]=30_522 , UpperCamelCase_: Any=768 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Any=3_072 , UpperCamelCase_: Dict="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Dict=512 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: List[Any]=1E-1_2 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Optional[int]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Any=None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class _a ( UpperCAmelCase__ ):
@property
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 110 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Any = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ):
"""simple docstring"""
lowerCamelCase = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 291 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
UpperCAmelCase_ = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
UpperCAmelCase_ = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
UpperCAmelCase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase_ = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase_ = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCamelCase__( UpperCAmelCase__):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : int = DPRContextEncoderTokenizer
class lowerCamelCase__( UpperCAmelCase__):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Optional[Any] = DPRQuestionEncoderTokenizer
UpperCAmelCase_ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCAmelCase_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCAmelCase_ = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__)
class lowerCamelCase__:
def __call__( self: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: List[str] = None , UpperCamelCase_: Optional[Any] = False , UpperCamelCase_: str = False , UpperCamelCase_: Optional[Any] = None , UpperCamelCase_: List[str] = None , UpperCamelCase_: Union[str, Any] = None , **UpperCamelCase_: Dict , ):
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
__lowerCamelCase = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
__lowerCamelCase = titles if not isinstance(_a , _a ) else [titles]
__lowerCamelCase = texts if not isinstance(_a , _a ) else [texts]
__lowerCamelCase = len(_a )
__lowerCamelCase = questions if not isinstance(_a , _a ) else [questions] * n_passages
assert len(_a ) == len(
_a ), F'There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.'
__lowerCamelCase = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
__lowerCamelCase = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
__lowerCamelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
__lowerCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCamelCase = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] = 16 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[int] = 4 , ):
__lowerCamelCase = reader_input["""input_ids"""]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = reader_output[:3]
__lowerCamelCase = len(_a )
__lowerCamelCase = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
__lowerCamelCase = []
for doc_id in sorted_docs:
__lowerCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCamelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCamelCase = len(_a )
__lowerCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , ):
__lowerCamelCase = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCamelCase = sorted(_a , key=lambda UpperCamelCase_ : x[1] , reverse=_a )
__lowerCamelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__lowerCamelCase = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__)
class lowerCamelCase__( UpperCAmelCase__ , UpperCAmelCase__):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Optional[int] = ['input_ids', 'attention_mask']
UpperCAmelCase__ : Union[str, Any] = DPRReaderTokenizer
| 12 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = process
lowerCamelCase = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
lowerCamelCase = self.dataset[i]
lowerCamelCase = self.process(_a , **self.params )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = loader
lowerCamelCase = infer
lowerCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase = None
lowerCamelCase = loader_batch_size
# Internal bookkeeping
lowerCamelCase = None
lowerCamelCase = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
lowerCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase = self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase = next(self.iterator )
lowerCamelCase = self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase = processed
lowerCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
super().__init__(_a , _a , _a )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
lowerCamelCase = None
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.subiterator is None:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase = next(self.subiterator )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase = False
lowerCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
lowerCamelCase = processed
lowerCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
else:
lowerCamelCase = processed
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
return accumulator
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return self.dataset[i][self.key]
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = keya
lowerCamelCase = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 291 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a : Union[str, Any] = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 |
"""simple docstring"""
def a__ ( snake_case__ ) -> bool:
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( snake_case__ = 50_00 ) -> int:
lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )]
for i, pentagonal_i in enumerate(snake_case__ ):
for j in range(snake_case__ , len(snake_case__ ) ):
lowerCamelCase = pentagonal_nums[j]
lowerCamelCase = pentagonal_i + pentagonal_j
lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 291 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> str:
"""simple docstring"""
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a)
| 317 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
with open(snake_case__ , """rb""" ) as flax_state_f:
lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
lowerCamelCase = """"""
lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" )
lowerCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase = []
lowerCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
lowerCamelCase = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCamelCase = """.""".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
lowerCamelCase = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
lowerCamelCase = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 291 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowercase_ = TypeVar("T")
lowercase_ = TypeVar("U")
class A ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> List[str]:
'''simple docstring'''
A__ = key
A__ = val
A__ = None
A__ = None
def __repr__( self : Optional[Any] )-> Dict:
'''simple docstring'''
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class A ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = DoubleLinkedListNode(_a,_a )
A__ = DoubleLinkedListNode(_a,_a )
A__ , A__ = self.rear, self.head
def __repr__( self : Tuple )-> List[str]:
'''simple docstring'''
A__ = ['DoubleLinkedList']
A__ = self.head
while node.next is not None:
rep.append(str(_a ) )
A__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_a )
def snake_case__ ( self : int,lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
A__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
A__ = node
A__ = previous
A__ = node
A__ = self.rear
def snake_case__ ( self : List[Any],lowercase_ : List[Any] )-> Any:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
A__ = node.next
A__ = node.prev
A__ = None
A__ = None
return node
class A ( Generic[T, U] ):
"""simple docstring"""
lowerCamelCase = {}
def __init__( self : Tuple,lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ = DoubleLinkedList()
A__ = capacity
A__ = 0
A__ = 0
A__ = 0
A__ = {}
def __repr__( self : Any )-> str:
'''simple docstring'''
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str,lowercase_ : List[str] )-> Optional[Any]:
'''simple docstring'''
return key in self.cache
def snake_case__ ( self : int,lowercase_ : str )-> Union[str, Any]:
'''simple docstring'''
if key in self.cache:
self.hits += 1
A__ = self.cache[key]
A__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_a )
return node.val
self.miss += 1
return None
def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : Tuple )-> Optional[Any]:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
A__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
A__ = DoubleLinkedListNode(_a,_a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
A__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
A__ = value
self.list.add(_a )
@classmethod
def snake_case__ ( cls : Tuple,lowercase_ : Any = 1_2_8 )-> str:
'''simple docstring'''
def cache_decorator_inner(lowercase_ : List[str] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase_ : int ) -> U:
if func not in cls.decorator_function_to_instance_map:
A__ = LRUCache(_a )
A__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
A__ = func(*_a )
cls.decorator_function_to_instance_map[func].put(args[0],_a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_a,'cache_info',_a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 291 | 0 |
'''simple docstring'''
import argparse
lowercase__ : str = """docs/source/_static/js/custom.js"""
def a__ ( lowercase : Any ) -> List[str]:
"""simple docstring"""
with open(snake_case__, encoding='''utf-8''', newline='''\n''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
_UpperCamelCase = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(snake_case__, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(snake_case__ )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowercase__ : Optional[Any] = parser.parse_args()
update_custom_js(args.version)
| 324 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291 | 0 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = WavaVecaPhonemeCTCTokenizer
__lowerCamelCase : str = False
def _snake_case ( self ) -> List[Any]:
super().setUp()
_lowerCAmelCase = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
_lowerCAmelCase = dict(zip(_a , range(len(_a ) ) ) )
_lowerCAmelCase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=20 , _lowerCAmelCase=5 ) -> int:
_lowerCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_a )) for i in range(len(_a ) )]
_lowerCAmelCase = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
_lowerCAmelCase = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
_lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
_lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
_lowerCAmelCase = " " + output_txt
_lowerCAmelCase = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def _snake_case ( self , **_lowerCAmelCase ) -> str:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
_lowerCAmelCase = tokenizer("m xxx ɪ" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
_lowerCAmelCase = tokenizer("m aaa ɪ ccc" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_lowerCAmelCase = tokenizer("maɪ c" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [3, 200] ) # mai should be <unk> (=3)
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(_a , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
_lowerCAmelCase = tokenizer.decode(tokenizer(_a ).input_ids )
self.assertEqual(_a , _a )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_lowerCAmelCase = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(_a , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
_lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_lowerCAmelCase = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
_lowerCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_a )
_lowerCAmelCase = tokenizer.batch_decode(_a , filter_word_delimiter_token=_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
_lowerCAmelCase = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(_a , _a )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer.phonemize(_a , phonemizer_lang="en-us" )
_lowerCAmelCase = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , _a )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=_a )
_lowerCAmelCase = "Hello how are you"
_lowerCAmelCase = tokenizer(_a , phonemizer_lang="en-us" ).input_ids
_lowerCAmelCase = tokenizer(_a , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(_a , _a )
_lowerCAmelCase = tokenizer.decode(_a )
_lowerCAmelCase = tokenizer.decode(_a )
self.assertEqual(_a , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(_a , "ɛ l o h aʊ a ʁ j u" )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_lowerCAmelCase = "Hello how Are you"
_lowerCAmelCase = "hello how are you"
_lowerCAmelCase = tokenizer(_a ).input_ids
_lowerCAmelCase = tokenizer(_a ).input_ids
self.assertEqual(_a , _a )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
_lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_lowerCAmelCase = tokenizer.batch_decode(_a )
self.assertEqual(_a , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _snake_case ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_lowerCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_lowerCAmelCase = tokenizer.decode(_a , output_char_offsets=_a , filter_word_delimiter_token=_a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(_a , _a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(isinstance(_a , _a ) )
self.assertTrue(isinstance(outputs_list[0] , _a ) )
# transform list to ModelOutput
_lowerCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(_lowerCAmelCase , _lowerCAmelCase ):
if isinstance(_a , _a ):
[recursive_check(_a , _a ) for la, la in zip(_a , _a )]
self.assertEqual(_a , _a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
_lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_lowerCAmelCase = tokenizer.batch_decode(_a , output_char_offsets=_a )
_lowerCAmelCase = [tokenizer.decode(_a , output_char_offsets=_a ) for ids in sample_ids]
check_list_tuples_equal(_a , _a )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _snake_case ( self ) -> Any:
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _snake_case ( self ) -> Any:
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _snake_case ( self ) -> Union[str, Any]:
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCAmelCase = tokenizer.add_tokens(_a )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
_lowerCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_lowerCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCAmelCase = tokenizer.add_special_tokens(_a )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
_lowerCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _snake_case ( self ) -> List[str]:
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _snake_case ( self ) -> int:
pass
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCAmelCase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
_lowerCAmelCase = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(output["text"] , _a )
| 158 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a = 16
_a = 32
def lowerCAmelCase__(__snake_case ,__snake_case = 16 ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase__ = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=snake_case__ ,max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ = 8
else:
lowerCamelCase__ = None
return tokenizer.pad(
snake_case__ ,padding='''longest''' ,max_length=snake_case__ ,pad_to_multiple_of=snake_case__ ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets['''train'''] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
lowerCamelCase__ = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a = mocked_dataloaders # noqa: F811
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,snake_case__ ) == "1":
lowerCamelCase__ = 2
# Initialize accelerator
lowerCamelCase__ = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config['''lr''']
lowerCamelCase__ = int(config['''num_epochs'''] )
lowerCamelCase__ = int(config['''seed'''] )
lowerCamelCase__ = int(config['''batch_size'''] )
lowerCamelCase__ = evaluate.load('''glue''' ,'''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(__snake_case ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ = AdamW(params=model.parameters() ,lr=snake_case__ )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(snake_case__ ,snake_case__ )
# Instantiate scheduler
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=snake_case__ ,num_warmup_steps=100 ,num_training_steps=(len(snake_case__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ = model(**snake_case__ )
lowerCamelCase__ = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ = model(**snake_case__ )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ ,references=snake_case__ ,)
lowerCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase__() -> List[str]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=snake_case__ ,default=snake_case__ ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ ,snake_case__ )
if __name__ == "__main__":
main()
| 209 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = resample
lowerCamelCase = do_center_crop
lowerCamelCase = crop_size
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_normalize
lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
"""simple docstring"""
lowerCamelCase = do_resize if do_resize is not None else self.do_resize
lowerCamelCase = size if size is not None else self.size
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = resample if resample is not None else self.resample
lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase = crop_size if crop_size is not None else self.crop_size
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase = image_mean if image_mean is not None else self.image_mean
lowerCamelCase = image_std if image_std is not None else self.image_std
lowerCamelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_a ):
lowerCamelCase = target_sizes.numpy()
lowerCamelCase = []
for idx in range(len(_a ) ):
lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a )
lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowerCamelCase = logits.argmax(dim=1 )
lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 291 | 0 |
import os
import string
import sys
_UpperCAmelCase : Tuple = 1 << 8
_UpperCAmelCase : str = {
"""tab""": ord("\t"),
"""newline""": ord("\r"),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
_UpperCAmelCase : int = KEYMAP["""up"""]
_UpperCAmelCase : Dict = KEYMAP["""left"""]
if sys.platform == "win32":
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[str] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
_UpperCAmelCase : Dict = ord(str(i))
def A ( ) -> Tuple:
'''simple docstring'''
if os.name == "nt":
import msvcrt
UpperCamelCase = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case__ ) == 0:
# Read the keystroke
UpperCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(snake_case__ )
if ord(snake_case__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
UpperCamelCase = chr(KEYMAP['esc'] )
except KeyError:
UpperCamelCase = cha[1]
else:
UpperCamelCase = ch.decode(snake_case__ )
else:
UpperCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCamelCase = sys.stdin.fileno()
UpperCamelCase = termios.tcgetattr(snake_case__ )
try:
tty.setraw(snake_case__ )
UpperCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ )
return ch
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = get_raw_chars()
if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case__ ) == KEYMAP["esc"]:
UpperCamelCase = get_raw_chars()
if ord(snake_case__ ) == KEYMAP["mod_int"]:
UpperCamelCase = get_raw_chars()
if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 222 |
"""simple docstring"""
import operator as op
lowerCAmelCase : Dict = """scaler.pt"""
lowerCAmelCase : Tuple = """pytorch_model"""
lowerCAmelCase : Union[str, Any] = """random_states"""
lowerCAmelCase : Union[str, Any] = """optimizer"""
lowerCAmelCase : Dict = """scheduler"""
lowerCAmelCase : int = """pytorch_model.bin"""
lowerCAmelCase : str = """pytorch_model.bin.index.json"""
lowerCAmelCase : Union[str, Any] = """model.safetensors"""
lowerCAmelCase : List[Any] = """model.safetensors.index.json"""
lowerCAmelCase : List[Any] = """1.10.2"""
lowerCAmelCase : Any = """py38"""
lowerCAmelCase : Optional[int] = """4.17.0"""
lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCAmelCase : Any = """2.0.1"""
lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase : Union[str, Any] = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 291 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
lowercase_ = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class snake_case ( UpperCAmelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_INIT_CONFIGURATION
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ElectraTokenizer
def __init__( self : List[str], _lowerCamelCase : Any=None, _lowerCamelCase : str=None, _lowerCamelCase : Any=True, _lowerCamelCase : List[Any]="[UNK]", _lowerCamelCase : List[Any]="[SEP]", _lowerCamelCase : Union[str, Any]="[PAD]", _lowerCamelCase : Optional[int]="[CLS]", _lowerCamelCase : str="[MASK]", _lowerCamelCase : Dict=True, _lowerCamelCase : Optional[int]=None, **_lowerCamelCase : Optional[int], ):
'''simple docstring'''
super().__init__(
_a, tokenizer_file=_a, do_lower_case=_a, unk_token=_a, sep_token=_a, pad_token=_a, cls_token=_a, mask_token=_a, tokenize_chinese_chars=_a, strip_accents=_a, **_a, )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', _a ) != do_lower_case
or normalizer_state.get('''strip_accents''', _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', _a ) != tokenize_chinese_chars
):
__A = getattr(_a, normalizer_state.pop('''type''' ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**_a )
__A = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Dict, _lowerCamelCase : Dict = None ):
'''simple docstring'''
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Optional[Any] = None ):
'''simple docstring'''
__A = self._tokenizer.model.save(_a, name=_a )
return tuple(_a )
| 266 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Any:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
import operator as op
UpperCAmelCase__ = """scaler.pt"""
UpperCAmelCase__ = """pytorch_model"""
UpperCAmelCase__ = """random_states"""
UpperCAmelCase__ = """optimizer"""
UpperCAmelCase__ = """scheduler"""
UpperCAmelCase__ = """pytorch_model.bin"""
UpperCAmelCase__ = """pytorch_model.bin.index.json"""
UpperCAmelCase__ = """model.safetensors"""
UpperCAmelCase__ = """model.safetensors.index.json"""
UpperCAmelCase__ = """1.10.2"""
UpperCAmelCase__ = """py38"""
UpperCAmelCase__ = """4.17.0"""
UpperCAmelCase__ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
UpperCAmelCase__ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
UpperCAmelCase__ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
UpperCAmelCase__ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
UpperCAmelCase__ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
UpperCAmelCase__ = """2.0.1"""
UpperCAmelCase__ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
UpperCAmelCase__ = ["""default""", """reduce-overhead""", """max-autotune"""]
UpperCAmelCase__ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase__ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
UpperCAmelCase__ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
UpperCAmelCase__ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 339 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]:
lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]:
if split_mlp_wi:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowerCamelCase = (wi_a, wi_a)
else:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict:
lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 2 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
lowerCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCamelCase = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCamelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str:
lowerCamelCase = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase = UMTaEncoderModel(snake_case__ )
else:
lowerCamelCase = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 291 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
lowerCAmelCase = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
lowerCAmelCase = {
"""ctrl""": 256,
}
lowerCAmelCase = {
"""Pregnancy""": 16_8629,
"""Christianity""": 7675,
"""Explain""": 10_6423,
"""Fitness""": 6_3440,
"""Saving""": 6_3163,
"""Ask""": 2_7171,
"""Ass""": 9_5985,
"""Joke""": 16_3509,
"""Questions""": 4_5622,
"""Thoughts""": 4_9605,
"""Retail""": 5_2342,
"""Feminism""": 16_4338,
"""Writing""": 1_1992,
"""Atheism""": 19_2263,
"""Netflix""": 4_8616,
"""Computing""": 3_9639,
"""Opinion""": 4_3213,
"""Alone""": 4_4967,
"""Funny""": 5_8917,
"""Gaming""": 4_0358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 7_7138,
"""Diet""": 3_6206,
"""Legal""": 1_1859,
"""Norman""": 4939,
"""Tip""": 7_2689,
"""Weight""": 5_2343,
"""Movies""": 4_6273,
"""Running""": 2_3425,
"""Science""": 2090,
"""Horror""": 3_7793,
"""Confession""": 6_0572,
"""Finance""": 1_2250,
"""Politics""": 1_6360,
"""Scary""": 19_1985,
"""Support""": 1_2654,
"""Technologies""": 3_2516,
"""Teenage""": 6_6160,
"""Event""": 3_2769,
"""Learned""": 6_7460,
"""Notion""": 18_2770,
"""Wikipedia""": 3_7583,
"""Books""": 6665,
"""Extract""": 7_6050,
"""Confessions""": 10_2701,
"""Conspiracy""": 7_5932,
"""Links""": 6_3674,
"""Narcissus""": 15_0425,
"""Relationship""": 5_4766,
"""Relationships""": 13_4796,
"""Reviews""": 4_1671,
"""News""": 4256,
"""Translation""": 2_6820,
"""multilingual""": 12_8406,
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(snake_case__ )
return pairs
class _a ( UpperCAmelCase__ ):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = CONTROL_CODES
def __init__( self: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]="<unk>" , **UpperCamelCase_: List[Any] ) -> Any:
"""simple docstring"""
super().__init__(unk_token=_a , **_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
lowercase__ = json.load(_a )
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
lowercase__ = merges_handle.read().split('''\n''' )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in merges]
lowercase__ = dict(zip(_a , range(len(_a ) ) ) )
lowercase__ = {}
@property
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(_a )
lowercase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase__ = get_pairs(_a )
if not pairs:
return token
while True:
lowercase__ = min(_a , key=lambda UpperCamelCase_ : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(_a ):
try:
lowercase__ = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(_a )
lowercase__ = new_word
if len(_a ) == 1:
break
else:
lowercase__ = get_pairs(_a )
lowercase__ = '''@@ '''.join(_a )
lowercase__ = word[:-4]
lowercase__ = word
return word
def lowerCamelCase_ ( self: int , UpperCamelCase_: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(r'''\S+\n?''' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(''' ''' ) ) )
return split_tokens
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: List[Any] ) -> int:
"""simple docstring"""
return self.decoder.get(_a , self.unk_token )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[str] ) -> Any:
"""simple docstring"""
lowercase__ = ''' '''.join(_a ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] = None ) -> Dict:
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
lowercase__ = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 110 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> bool:
if len(snake_case__ ) == 0:
return False
lowerCamelCase = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 291 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any]=13 , UpperCamelCase_: Union[str, Any]=10 , UpperCamelCase_: str=3 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Tuple=32 , UpperCamelCase_: Any=5 , UpperCamelCase_: str=4 , UpperCamelCase_: Optional[Any]=37 , UpperCamelCase_: Dict="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[str]=10 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Dict=0.9 , UpperCamelCase_: Optional[Any]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = tubelet_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = mask_ratio
__lowerCamelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowerCamelCase = int(mask_ratio * self.seq_length )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = VideoMAEModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: str ):
__lowerCamelCase = VideoMAEForPreTraining(_a )
model.to(_a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCamelCase = torch.ones((self.num_masks,) )
__lowerCamelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowerCamelCase = mask.expand(self.batch_size , -1 ).bool()
__lowerCamelCase = model(_a , _a )
# model only returns predictions for masked patches
__lowerCamelCase = mask.sum().item()
__lowerCamelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : Optional[int] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = VideoMAEModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any=False ):
__lowerCamelCase = copy.deepcopy(_a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCamelCase = torch.ones((self.model_tester.num_masks,) )
__lowerCamelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowerCamelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__lowerCamelCase = bool_masked_pos.to(_a )
if return_labels:
if model_class in [
*get_values(_a ),
]:
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def lowerCAmelCase__ ( self: Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: int ):
pass
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_a )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
@slow
def lowerCAmelCase__ ( self: str ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = VideoMAEModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase__ ( self: Optional[int] ):
if not self.has_attentions:
pass
else:
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length - self.model_tester.num_masks
__lowerCamelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(_a , _a ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(_a , _a ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowerCamelCase = len(_a )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
def check_hidden_states_output(UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int ):
__lowerCamelCase = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(_a , _a ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_a ) , _a )
__lowerCamelCase = self.model_tester.seq_length - self.model_tester.num_masks
__lowerCamelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: Any ):
pass
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCamelCase = np.load(snake_case__ )
return list(snake_case__ )
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: Optional[Any] ):
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
_a )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**_a )
# verify the logits
__lowerCamelCase = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _a )
__lowerCamelCase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(_a )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(_a , return_tensors="""pt""" ).to(_a )
# add boolean mask, indicating which patches to mask
__lowerCamelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCamelCase = torch.load(_a )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**_a )
# verify the logits
__lowerCamelCase = torch.Size([1, 14_08, 15_36] )
__lowerCamelCase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_a )
self.assertEqual(outputs.logits.shape , _a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowerCamelCase = torch.tensor([0.5142] , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowerCamelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=_a ).to(
_a )
with torch.no_grad():
__lowerCamelCase = model(**_a )
__lowerCamelCase = torch.tensor(torch.tensor([0.6469] ) , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1E-4 ) )
| 12 |
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) < 2:
return collection
def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = False
if low == high:
return swapped
lowerCamelCase = low
lowerCamelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase , lowerCamelCase = (
collection[right],
collection[left],
)
lowerCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase , lowerCamelCase = (
collection[right + 1],
collection[left],
)
lowerCamelCase = True
lowerCamelCase = low + int((high - low) / 2 )
lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ )
return swapped or left_swap or right_swap
lowerCamelCase = True
while is_not_sorted is True:
lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 291 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if "model" in orig_key:
__lowercase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
__lowercase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
__lowercase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
__lowercase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
__lowercase = orig_key.split('''.''' )[0].split('''_''' )[-1]
__lowercase = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
__lowercase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
__lowercase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
__lowercase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
__lowercase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
__lowercase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
__lowercase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
__lowercase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
__lowercase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
__lowercase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
__lowercase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
__lowercase = '''yoso.''' + orig_key
return orig_key
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(snake_case__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__lowercase = val
__lowercase = orig_state_dict['''cls.predictions.decoder.bias''']
__lowercase = torch.arange(snake_case__ ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = torch.load(snake_case__ , map_location='''cpu''' )['''model_state_dict''']
__lowercase = YosoConfig.from_json_file(snake_case__ )
__lowercase = YosoForMaskedLM(snake_case__ )
__lowercase = convert_checkpoint_helper(config.max_position_embeddings , snake_case__ )
print(model.load_state_dict(snake_case__ ) )
model.eval()
model.save_pretrained(snake_case__ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) | 210 |
"""simple docstring"""
from collections.abc import Generator
def a__ ( ) -> Generator[int, None, None]:
lowerCamelCase , lowerCamelCase = 0, 1
while True:
lowerCamelCase , lowerCamelCase = b, a + b
yield b
def a__ ( snake_case__ = 10_00 ) -> int:
lowerCamelCase = 1
lowerCamelCase = fibonacci_generator()
while len(str(next(snake_case__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 291 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
a__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
a__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str:
return float((preds == labels).mean() )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int="binary" ) -> Optional[Any]:
_snake_case : Tuple = simple_accuracy(snake_case__ , snake_case__ )
_snake_case : str = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ , average=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
_snake_case : Any = {}
for id_pred, label in zip(snake_case__ , snake_case__ ):
_snake_case : str = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_snake_case : int = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_snake_case : List[Any] = [(pred, label)]
_snake_case , _snake_case : Any = [], []
for question, preds_labels in question_map.items():
_snake_case , _snake_case : List[str] = zip(*snake_case__ )
_snake_case : Optional[int] = fa_score(y_true=snake_case__ , y_pred=snake_case__ , average="""macro""" )
fas.append(snake_case__ )
_snake_case : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(snake_case__ ) )
ems.append(snake_case__ )
_snake_case : List[Any] = float(sum(snake_case__ ) / len(snake_case__ ) )
_snake_case : Dict = sum(snake_case__ ) / len(snake_case__ )
_snake_case : List[Any] = float(fa_score(y_true=snake_case__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCamelCase_ ( self : List[str]) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"prediction_text": datasets.Value("""string"""),
},
"references": {
"idx": {
"passage": datasets.Value("""int64"""),
"query": datasets.Value("""int64"""),
},
"answers": datasets.Sequence(datasets.Value("""string""")),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64"""),
"paragraph": datasets.Value("""int64"""),
"question": datasets.Value("""int64"""),
},
"prediction": datasets.Value("""int64"""),
},
"references": datasets.Value("""int64"""),
}
else:
return {
"predictions": datasets.Value("""int64"""),
"references": datasets.Value("""int64"""),
}
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any) -> int:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a)}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg="""macro""")
elif self.config_name == "record":
_snake_case : Dict = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
_snake_case : int = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_a , _a)[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a)}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""")
| 317 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self : Optional[int],lowercase_ : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = hf_hub_download(
repo_id='nateraw/video-demo',filename='archery.mp4',repo_type='dataset' )
A__ = VideoClassificationPipeline(model=_a,image_processor=_a,top_k=2 )
A__ = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : Dict )-> Tuple:
'''simple docstring'''
for example in examples:
A__ = video_classifier(_a )
self.assertEqual(
_a,[
{'score': ANY(_a ), 'label': ANY(_a )},
{'score': ANY(_a ), 'label': ANY(_a )},
],)
@require_torch
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
A__ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
A__ = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0},crop_size={'height': 1_0, 'width': 1_0} )
A__ = pipeline(
'video-classification',model=_a,feature_extractor=_a,frame_sampling_rate=4 )
A__ = hf_hub_download(repo_id='nateraw/video-demo',filename='archery.mp4',repo_type='dataset' )
A__ = video_classifier(_a,top_k=2 )
self.assertEqual(
nested_simplify(_a,decimals=4 ),[{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}],)
A__ = video_classifier(
[
video_file_path,
video_file_path,
],top_k=2,)
self.assertEqual(
nested_simplify(_a,decimals=4 ),[
[{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}],
[{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}],
],)
@require_tf
def snake_case__ ( self : str )-> int:
'''simple docstring'''
pass
| 7 |
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase__ : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 324 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
if len(snake_case__ ) <= 1 or n <= 1:
return
insert_next(snake_case__ , n - 1 )
rec_insertion_sort(snake_case__ , n - 1 )
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
if index >= len(snake_case__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_lowerCAmelCase , _lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case__ , index + 1 )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("Enter integers separated by spaces: ")
_SCREAMING_SNAKE_CASE = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 158 |
"""simple docstring"""
from typing import Any
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list:
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCamelCase = {}
lowerCamelCase = {}
for state in states_space:
lowerCamelCase = observations_space[0]
lowerCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
lowerCamelCase = observations_space[o]
lowerCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
# Update probabilities and pointers dicts
lowerCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase = arg_max
# The final observation
lowerCamelCase = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
lowerCamelCase = arg_max
# Process pointers backwards
lowerCamelCase = last_state
lowerCamelCase = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
lowerCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_list(snake_case__ , """observations_space""" )
_validate_list(snake_case__ , """states_space""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list'
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list of strings'
raise ValueError(snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ )
_validate_nested_dict(snake_case__ , """transition_probabilities""" )
_validate_nested_dict(snake_case__ , """emission_probabilities""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a dict'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
lowerCamelCase = F'{var_name} all keys must be strings'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
lowerCamelCase = """nested dictionary """ if nested else """"""
lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case__ ,id=snake_case__ )
| 209 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
if "model" in sd.keys():
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case__ )
lowerCamelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase = sd.pop(snake_case__ )
lowerCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
lowerCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase , lowerCamelCase , lowerCamelCase = torch.split(snake_case__ , depth // 3 , dim=0 )
lowerCamelCase = q
lowerCamelCase = k
lowerCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple:
lowerCamelCase = load_checkpoint(snake_case__ )
if config is not None:
lowerCamelCase = OPTConfig.from_pretrained(snake_case__ )
else:
lowerCamelCase = OPTConfig()
lowerCamelCase = OPTModel(snake_case__ ).half().eval()
model.load_state_dict(snake_case__ )
# Check results
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 291 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=snake_case__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=snake_case__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=snake_case__ )
return parser.parse_args()
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 222 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
# fmt: off
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(_a , return_tensors="""np""" )
lowerCamelCase = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=_a )
lowerCamelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(_a )
lowerCamelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__A = len(bin(snake_case__ )[3:] )
__A = bin(abs(snake_case__ ) - (1 << binary_number_length) )[3:]
__A = (
(
'''1'''
+ '''0''' * (binary_number_length - len(snake_case__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def a__ ( ) -> List[str]:
lowerCamelCase = parse_args()
# Import training_script as a module.
lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase = script_fpath.stem
lowerCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 339 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "sew-d"
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a=512 , _a=256 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCamelCase = hidden_size
lowerCamelCase = feat_extract_norm
lowerCamelCase = feat_extract_activation
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = conv_bias
lowerCamelCase = num_conv_pos_embeddings
lowerCamelCase = num_conv_pos_embedding_groups
lowerCamelCase = len(self.conv_dim )
lowerCamelCase = num_hidden_layers
lowerCamelCase = intermediate_size
lowerCamelCase = squeeze_factor
lowerCamelCase = max_position_embeddings
lowerCamelCase = position_buckets
lowerCamelCase = share_att_key
lowerCamelCase = relative_attention
lowerCamelCase = norm_rel_ebd
lowerCamelCase = list(_a )
lowerCamelCase = hidden_act
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = feat_proj_dropout
lowerCamelCase = final_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = feature_layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase = apply_spec_augment
lowerCamelCase = mask_time_prob
lowerCamelCase = mask_time_length
lowerCamelCase = mask_time_min_masks
lowerCamelCase = mask_feature_prob
lowerCamelCase = mask_feature_length
lowerCamelCase = mask_feature_min_masks
# ctc loss
lowerCamelCase = ctc_loss_reduction
lowerCamelCase = ctc_zero_infinity
# sequence classification
lowerCamelCase = use_weighted_layer_sum
lowerCamelCase = classifier_proj_size
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ = [3, 3, 3, 3]
lowercase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ = [4, 4, 4, 4]
lowercase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ = [3, 3, 3, 3]
else:
lowercase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ = 96
elif "small" in model_name:
lowercase__ = 96
elif "base" in model_name:
lowercase__ = 1_28
elif "large" in model_name:
lowercase__ = 1_92
elif "xlarge" in model_name:
lowercase__ = 2_56
elif "huge" in model_name:
lowercase__ = 3_52
# set label information
lowercase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ = '''imagenet-22k-id2label.json'''
else:
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = FocalNetConfig(
embed_dim=snake_case__ , depths=snake_case__ , focal_levels=snake_case__ , focal_windows=snake_case__ , use_conv_embed=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , use_post_layernorm=snake_case__ , use_layerscale=snake_case__ , )
return config
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ = '''layernorm.bias'''
if "head" in name:
lowercase__ = name.replace('''head''' , '''classifier''' )
else:
lowercase__ = '''focalnet.''' + name
return name
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , snake_case__ )
lowercase__ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(snake_case__ )
lowercase__ = val
lowercase__ = get_focalnet_config(snake_case__ )
lowercase__ = FocalNetForImageClassification(snake_case__ )
model.eval()
# load state dict
model.load_state_dict(snake_case__ )
# verify conversion
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = BitImageProcessor(
do_resize=snake_case__ , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case__ , crop_size=2_24 , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , )
lowercase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
lowercase__ = processor(images=snake_case__ , return_tensors='''pt''' )
lowercase__ = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ = image_transforms(snake_case__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , snake_case__ , atol=1E-4 )
lowercase__ = model(**snake_case__ )
lowercase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowercase__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowercase__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowercase__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowercase__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Any = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ):
"""simple docstring"""
lowerCamelCase = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 291 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=snake_case__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=snake_case__ , default=5 )
parser.add_argument("""--batch_size""" , type=snake_case__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=snake_case__ , default=1 )
parser.add_argument("""--freeze""" , type=snake_case__ , default=snake_case__ )
parser.add_argument("""--learning_rate""" , type=snake_case__ , default=5E-4 )
parser.add_argument("""--seed""" , type=snake_case__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=snake_case__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=snake_case__ , default=10 )
parser.add_argument("""--weight_decay""" , type=snake_case__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=snake_case__ , default="""./results""" )
return parser.parse_args()
UpperCAmelCase_ = load('accuracy')
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = eval_pred
__lowerCamelCase = np.argmax(snake_case__ , axis=1 )
return metric.compute(predictions=snake_case__ , references=snake_case__ )
class lowerCamelCase__( UpperCAmelCase__):
def __init__( self: int , UpperCamelCase_: List[Any] ):
super().__init__()
__lowerCamelCase = trainer
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Tuple ):
if control.should_evaluate:
__lowerCamelCase = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = get_args()
set_seed(args.seed )
__lowerCamelCase = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__lowerCamelCase = dataset.train_test_split(test_size=0.2 )
__lowerCamelCase = train_test["""test"""].train_test_split(test_size=0.5 )
__lowerCamelCase = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCamelCase = tokenizer.eos_token
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__lowerCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowerCamelCase = False
__lowerCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(A__ : int ):
__lowerCamelCase = tokenizer(example["""src"""] , truncation=snake_case__ , max_length=1024 )
__lowerCamelCase = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowerCamelCase = train_test_validation.map(
snake_case__ , batched=snake_case__ , remove_columns=train_test_validation["""train"""].column_names , )
__lowerCamelCase = DataCollatorWithPadding(tokenizer=snake_case__ )
__lowerCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__lowerCamelCase = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=snake_case__ , data_collator=snake_case__ , compute_metrics=snake_case__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(snake_case__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 12 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = process
lowerCamelCase = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
lowerCamelCase = self.dataset[i]
lowerCamelCase = self.process(_a , **self.params )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = loader
lowerCamelCase = infer
lowerCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase = None
lowerCamelCase = loader_batch_size
# Internal bookkeeping
lowerCamelCase = None
lowerCamelCase = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
lowerCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase = self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase = next(self.iterator )
lowerCamelCase = self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase = processed
lowerCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
super().__init__(_a , _a , _a )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
lowerCamelCase = None
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.subiterator is None:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase = next(self.subiterator )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase = False
lowerCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
lowerCamelCase = processed
lowerCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
else:
lowerCamelCase = processed
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
return accumulator
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return self.dataset[i][self.key]
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = keya
lowerCamelCase = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 291 | 0 |
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(snake_case__ ) < 2:
return collection
def circle_sort_util(lowercase , lowercase , lowercase ) -> bool:
__lowercase = False
if low == high:
return swapped
__lowercase = low
__lowercase = high
while left < right:
if collection[left] > collection[right]:
__lowercase , __lowercase = (
collection[right],
collection[left],
)
__lowercase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__lowercase , __lowercase = (
collection[right + 1],
collection[left],
)
__lowercase = True
__lowercase = low + int((high - low) / 2 )
__lowercase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ )
__lowercase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ )
return swapped or left_swap or right_swap
__lowercase = True
while is_not_sorted is True:
__lowercase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 )
return collection
if __name__ == "__main__":
__a : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
__a : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted)) | 210 |
"""simple docstring"""
def a__ ( snake_case__ ) -> bool:
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( snake_case__ = 50_00 ) -> int:
lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )]
for i, pentagonal_i in enumerate(snake_case__ ):
for j in range(snake_case__ , len(snake_case__ ) ):
lowerCamelCase = pentagonal_nums[j]
lowerCamelCase = pentagonal_i + pentagonal_j
lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 291 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
assert x is not None
assert y is not None
_snake_case : List[Any] = len(snake_case__ )
_snake_case : Tuple = len(snake_case__ )
# declaring the array for storing the dp values
_snake_case : List[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_snake_case : Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
_snake_case : Optional[int] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_snake_case : Any = """"""
_snake_case , _snake_case : Dict = m, n
while i > 0 and j > 0:
_snake_case : Tuple = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_snake_case : int = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
a__ = """AGGTAB"""
a__ = """GXTXAYB"""
a__ = 4
a__ = """GTAB"""
a__ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 317 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
with open(snake_case__ , """rb""" ) as flax_state_f:
lowerCamelCase = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
lowerCamelCase = """"""
lowerCamelCase = flatten_dict(snake_case__ , sep=""".""" )
lowerCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase = []
lowerCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
lowerCamelCase = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCamelCase = """.""".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowerCamelCase = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
lowerCamelCase = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
lowerCamelCase = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 291 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> YolosConfig:
'''simple docstring'''
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(snake_case__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] = False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
'''simple docstring'''
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def _snake_case( ) -> torch.Tensor:
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] = False ) -> Optional[int]:
'''simple docstring'''
A__ = get_yolos_config(snake_case__ )
# load original state_dict
A__ = torch.load(snake_case__ , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(snake_case__ )
model.eval()
A__ = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=snake_case__ )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**snake_case__ )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1E-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization='hustvl' )
model.push_to_hub(snake_case__ , organization='hustvl' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 291 | 0 |
'''simple docstring'''
from typing import Any
def a__ ( lowercase : Dict, lowercase : List[Any], lowercase : int, lowercase : Tuple, lowercase : Dict, ) -> list:
"""simple docstring"""
_validation(
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, )
# Creates data structures and fill initial step
_UpperCamelCase = {}
_UpperCamelCase = {}
for state in states_space:
_UpperCamelCase = observations_space[0]
_UpperCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1, len(snake_case__ ) ):
_UpperCamelCase = observations_space[o]
_UpperCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCamelCase = ''''''
_UpperCamelCase = -1
for k_state in states_space:
_UpperCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCamelCase = probability
_UpperCamelCase = k_state
# Update probabilities and pointers dicts
_UpperCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCamelCase = arg_max
# The final observation
_UpperCamelCase = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
_UpperCamelCase = ''''''
_UpperCamelCase = -1
for k_state in states_space:
_UpperCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCamelCase = probability
_UpperCamelCase = k_state
_UpperCamelCase = arg_max
# Process pointers backwards
_UpperCamelCase = last_state
_UpperCamelCase = []
for o in range(len(snake_case__ ) - 1, -1, -1 ):
result.append(snake_case__ )
_UpperCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( lowercase : str, lowercase : Union[str, Any], lowercase : List[str], lowercase : Tuple, lowercase : Tuple, ) -> None:
"""simple docstring"""
_validate_not_empty(
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, )
_validate_lists(snake_case__, snake_case__ )
_validate_dicts(
snake_case__, snake_case__, snake_case__ )
def a__ ( lowercase : Optional[int], lowercase : Optional[int], lowercase : List[Any], lowercase : Optional[Any], lowercase : int, ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def a__ ( lowercase : Optional[Any], lowercase : List[Any] ) -> None:
"""simple docstring"""
_validate_list(snake_case__, '''observations_space''' )
_validate_list(snake_case__, '''states_space''' )
def a__ ( lowercase : str, lowercase : Tuple ) -> None:
"""simple docstring"""
if not isinstance(_object, snake_case__ ):
_UpperCamelCase = F"""{var_name} must be a list"""
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__, snake_case__ ):
_UpperCamelCase = F"""{var_name} must be a list of strings"""
raise ValueError(snake_case__ )
def a__ ( lowercase : List[str], lowercase : Any, lowercase : Union[str, Any], ) -> None:
"""simple docstring"""
_validate_dict(snake_case__, '''initial_probabilities''', snake_case__ )
_validate_nested_dict(snake_case__, '''transition_probabilities''' )
_validate_nested_dict(snake_case__, '''emission_probabilities''' )
def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> None:
"""simple docstring"""
_validate_dict(_object, snake_case__, snake_case__ )
for x in _object.values():
_validate_dict(snake_case__, snake_case__, snake_case__, snake_case__ )
def a__ ( lowercase : List[Any], lowercase : List[str], lowercase : Union[str, Any], lowercase : Union[str, Any] = False ) -> None:
"""simple docstring"""
if not isinstance(_object, snake_case__ ):
_UpperCamelCase = F"""{var_name} must be a dict"""
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__, snake_case__ ) for x in _object ):
_UpperCamelCase = F"""{var_name} all keys must be strings"""
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__, snake_case__ ) for x in _object.values() ):
_UpperCamelCase = '''nested dictionary ''' if nested else ''''''
_UpperCamelCase = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_lowerCAmelCase = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_a , _a )
def _snake_case ( self , **_lowerCAmelCase ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _snake_case ( self , **_lowerCAmelCase ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _snake_case ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_lowerCAmelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(_a , return_tensors="np" )
_lowerCAmelCase = processor(images=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = processor(text=_a )
_lowerCAmelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(_a )
_lowerCAmelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 158 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=4 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_attention_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_choices
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_attention_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxRoFormerModelTester(self )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_a )
lowerCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowerCamelCase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(_a )[0]
lowerCamelCase__ = 5_0_0_0_0
lowerCamelCase__ = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 209 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = resample
lowerCamelCase = do_center_crop
lowerCamelCase = crop_size
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_normalize
lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
"""simple docstring"""
lowerCamelCase = do_resize if do_resize is not None else self.do_resize
lowerCamelCase = size if size is not None else self.size
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = resample if resample is not None else self.resample
lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase = crop_size if crop_size is not None else self.crop_size
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase = image_mean if image_mean is not None else self.image_mean
lowerCamelCase = image_std if image_std is not None else self.image_std
lowerCamelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_a ):
lowerCamelCase = target_sizes.numpy()
lowerCamelCase = []
for idx in range(len(_a ) ):
lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a )
lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowerCamelCase = logits.argmax(dim=1 )
lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 291 | 0 |
def A ( lowercase , lowercase ) -> bool:
'''simple docstring'''
UpperCamelCase = len(snake_case__ )
UpperCamelCase = len(snake_case__ )
UpperCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase = True
for i in range(snake_case__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase = True
if a[i].islower():
UpperCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222 |
"""simple docstring"""
import operator as op
lowerCAmelCase : Dict = """scaler.pt"""
lowerCAmelCase : Tuple = """pytorch_model"""
lowerCAmelCase : Union[str, Any] = """random_states"""
lowerCAmelCase : Union[str, Any] = """optimizer"""
lowerCAmelCase : Dict = """scheduler"""
lowerCAmelCase : int = """pytorch_model.bin"""
lowerCAmelCase : str = """pytorch_model.bin.index.json"""
lowerCAmelCase : Union[str, Any] = """model.safetensors"""
lowerCAmelCase : List[Any] = """model.safetensors.index.json"""
lowerCAmelCase : List[Any] = """1.10.2"""
lowerCAmelCase : Any = """py38"""
lowerCAmelCase : Optional[int] = """4.17.0"""
lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCAmelCase : Any = """2.0.1"""
lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase : Union[str, Any] = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 291 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = XLMTokenizer
A_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__A = dict(zip(_a, range(len(_a ) ) ) )
__A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''' ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file, '''w''' ) as fp:
fp.write('''\n'''.join(_a ) )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = '''lower newer'''
__A = '''lower newer'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = XLMTokenizer(self.vocab_file, self.merges_file )
__A = '''lower'''
__A = ['''low''', '''er</w>''']
__A = tokenizer.tokenize(_a )
self.assertListEqual(_a, _a )
__A = tokens + ['''<unk>''']
__A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ), _a )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__A = tokenizer.encode('''sequence builders''', add_special_tokens=_a )
__A = tokenizer.encode('''multi-sequence build''', add_special_tokens=_a )
__A = tokenizer.build_inputs_with_special_tokens(_a )
__A = tokenizer.build_inputs_with_special_tokens(_a, _a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 266 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def a__ ( ) -> Any:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_a )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = R'\w+[.]\d+'
_UpperCAmelCase = re.findall(snake_case__ , snake_case__ )
for pat in pats:
_UpperCAmelCase = key.replace(snake_case__ , '_'.join(pat.split('.' ) ) )
return key
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=42 ) -> Union[str, Any]:
'''simple docstring'''
# Step 1: Convert pytorch tensor to numpy
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase = flax_model.init_weights(PRNGKey(snake_case__ ) )
_UpperCAmelCase = flatten_dict(snake_case__ )
_UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = rename_key(snake_case__ )
_UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(snake_case__ , snake_case__ , snake_case__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
| 339 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]:
lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]:
if split_mlp_wi:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowerCamelCase = (wi_a, wi_a)
else:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict:
lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 2 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
lowerCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCamelCase = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCamelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str:
lowerCamelCase = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase = UMTaEncoderModel(snake_case__ )
else:
lowerCamelCase = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 291 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ , lowercase__ = coefficient_matrix.shape
lowercase__ , lowercase__ = constant_matrix.shape
if rowsa != colsa:
lowercase__ = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(snake_case__ )
if colsa != 1:
lowercase__ = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(snake_case__ )
if rowsa != rowsa:
lowercase__ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(snake_case__ )
if len(snake_case__ ) != rowsa:
lowercase__ = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'matrix but received {len(snake_case__ )} and {rowsa}'
)
raise ValueError(snake_case__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowercase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase__ , lowercase__ = table.shape
strictly_diagonally_dominant(snake_case__ )
# Iterates the whole matrix for given number of times
for _ in range(snake_case__ ):
lowercase__ = []
for row in range(snake_case__ ):
lowercase__ = 0
for col in range(snake_case__ ):
if col == row:
lowercase__ = table[row][col]
elif col == cols - 1:
lowercase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase__ = (temp + val) / denom
new_val.append(snake_case__ )
lowercase__ = new_val
return [float(snake_case__ ) for i in new_val]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = table.shape
lowercase__ = True
for i in range(0 , snake_case__ ):
lowercase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> bool:
if len(snake_case__ ) == 0:
return False
lowerCamelCase = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 291 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: Tuple ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , _a )
__lowerCamelCase = kwargs.get("""latest_model_name""" , _a )
def __call__( self: Optional[Any] , **UpperCamelCase_: int ):
__lowerCamelCase = {k: np.array(_a ) for k, v in kwargs.items()}
return self.model.run(_a , _a )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: int , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(_a , providers=[provider] , sess_options=_a )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] = None , **UpperCamelCase_: str ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(_a ).joinpath(_a )
try:
shutil.copyfile(_a , _a )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(_a )
if src_path.exists():
__lowerCamelCase = Path(_a ).joinpath(_a )
try:
shutil.copyfile(_a , _a )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , **UpperCamelCase_: Any , ):
if os.path.isfile(_a ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(_a , exist_ok=_a )
# saving model weights/files
self._save_pretrained(_a , **_a )
@classmethod
def lowerCAmelCase__ ( cls: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any = None , UpperCamelCase_: str = None , UpperCamelCase_: str = False , UpperCamelCase_: List[Any] = None , UpperCamelCase_: Tuple = None , UpperCamelCase_: Any = None , UpperCamelCase_: Union[str, Any] = None , **UpperCamelCase_: Any , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_a ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(_a , _a ) , provider=_a , sess_options=_a )
__lowerCamelCase = Path(_a )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=_a , filename=_a , use_auth_token=_a , revision=_a , cache_dir=_a , force_download=_a , )
__lowerCamelCase = Path(_a ).parent
__lowerCamelCase = Path(_a ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(_a , provider=_a , sess_options=_a )
return cls(model=_a , **_a )
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: int = True , UpperCamelCase_: Any = None , UpperCamelCase_: int = None , **UpperCamelCase_: List[str] , ):
__lowerCamelCase = None
if len(str(_a ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_a , revision=_a , cache_dir=_a , force_download=_a , use_auth_token=_a , **_a , )
| 12 |
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) < 2:
return collection
def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = False
if low == high:
return swapped
lowerCamelCase = low
lowerCamelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase , lowerCamelCase = (
collection[right],
collection[left],
)
lowerCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase , lowerCamelCase = (
collection[right + 1],
collection[left],
)
lowerCamelCase = True
lowerCamelCase = low + int((high - low) / 2 )
lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ )
return swapped or left_swap or right_swap
lowerCamelCase = True
while is_not_sorted is True:
lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 291 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : int = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class _UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
__a : Dict = '''autoformer'''
__a : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 64 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_00 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = True , lowerCAmelCase__=True , lowerCAmelCase__ = 10 , lowerCAmelCase__ = 25 , lowerCAmelCase__ = 3 , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
__lowercase = prediction_length
__lowercase = context_length if context_length is not None else prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = embedding_dimension
else:
__lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
# Autoformer
__lowercase = label_length
__lowercase = moving_average
__lowercase = autocorrelation_factor
super().__init__(is_encoder_decoder=_a , **_a )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 210 |
"""simple docstring"""
from collections.abc import Generator
def a__ ( ) -> Generator[int, None, None]:
lowerCamelCase , lowerCamelCase = 0, 1
while True:
lowerCamelCase , lowerCamelCase = b, a + b
yield b
def a__ ( snake_case__ = 10_00 ) -> int:
lowerCamelCase = 1
lowerCamelCase = fibonacci_generator()
while len(str(next(snake_case__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 291 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class snake_case ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : Any=None , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if tokenize_kwargs is None:
_snake_case : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""")
_snake_case : List[str] = truncation
_snake_case : Optional[Any] = tokenize_kwargs
_snake_case : Union[str, Any] = {}
if return_tensors is not None:
_snake_case : Any = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = self.framework
_snake_case : Optional[int] = self.tokenizer(_a , return_tensors=_a , **_a)
return model_inputs
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = self.model(**_a)
return model_outputs
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Any=False) -> str:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Tuple , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
return super().__call__(*_a , **_a)
| 317 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = """▁"""
lowercase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowercase_ = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
lowercase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class A ( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = ['input_ids', 'attention_mask']
lowerCamelCase = []
lowerCamelCase = []
def __init__( self : Any,lowercase_ : Union[str, Any],lowercase_ : Any,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,lowercase_ : str="<s>",lowercase_ : Tuple="</s>",lowercase_ : str="</s>",lowercase_ : Any="<pad>",lowercase_ : Union[str, Any]="<unk>",lowercase_ : int="m2m100",lowercase_ : Optional[Any] = None,lowercase_ : Dict=8,**lowercase_ : int,)-> Optional[int]:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = language_codes
A__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
A__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
A__ = kwargs.get('additional_special_tokens',[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_a )
for lang_code in fairseq_language_code
if self.get_lang_token(_a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a,tgt_lang=_a,bos_token=_a,eos_token=_a,sep_token=_a,unk_token=_a,pad_token=_a,language_codes=_a,sp_model_kwargs=self.sp_model_kwargs,num_madeup_words=_a,**_a,)
A__ = vocab_file
A__ = load_json(_a )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(_a,self.sp_model_kwargs )
A__ = len(self.encoder )
A__ = {
self.get_lang_token(_a ): self.encoder_size + i for i, lang_code in enumerate(_a )
}
A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_a )}
A__ = {v: k for k, v in self.lang_token_to_id.items()}
A__ = src_lang if src_lang is not None else 'en'
A__ = tgt_lang
A__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A__ = num_madeup_words
@property
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def snake_case__ ( self : Optional[int] )-> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case__ ( self : Tuple,lowercase_ : Optional[int] )-> List[Any]:
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case__ ( self : int,lowercase_ : Tuple )-> List[Any]:
'''simple docstring'''
return self.sp_model.encode(_a,out_type=_a )
def snake_case__ ( self : Any,lowercase_ : Optional[int] )-> Tuple:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_a,self.encoder[self.unk_token] )
def snake_case__ ( self : List[Any],lowercase_ : Union[str, Any] )-> Dict:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_a,self.unk_token )
def snake_case__ ( self : Optional[int],lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ = []
A__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
A__ = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def snake_case__ ( self : List[str],lowercase_ : str,lowercase_ : Any = None,lowercase_ : str = False )-> List[str]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a,token_ids_a=_a,already_has_special_tokens=_a )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : Optional[Any] = None )-> Tuple:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any )-> List[str]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : int,lowercase_ : int )-> Union[str, Any]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
A__ = {}
A__ = load_spm(self.spm_file,self.sp_model_kwargs )
def snake_case__ ( self : Optional[int],lowercase_ : List[Any],lowercase_ : Union[str, Any] = None )-> Any:
'''simple docstring'''
A__ = Path(_a )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
A__ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
A__ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder,_a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file,_a )
elif not os.path.isfile(self.spm_file ):
with open(_a,'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : str = "en",lowercase_ : Union[str, Any] = None,lowercase_ : Optional[Any] = "ro",**lowercase_ : List[str],)-> Tuple:
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_a,_a,**_a )
def snake_case__ ( self : int,lowercase_ : int,lowercase_ : Optional[int],lowercase_ : int,**lowercase_ : Tuple )-> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A__ = src_lang
A__ = self(_a,add_special_tokens=_a,**_a )
A__ = self.get_lang_id(_a )
A__ = tgt_lang_id
return inputs
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self : List[Any],lowercase_ : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.get_lang_token(_a )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def snake_case__ ( self : Tuple,lowercase_ : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.get_lang_token(_a )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
return self.lang_code_to_token[lang]
def snake_case__ ( self : str,lowercase_ : List[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.get_lang_token(_a )
return self.lang_token_to_id[lang_token]
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
A__ = sentencepiece.SentencePieceProcessor(**snake_case__ )
spm.Load(str(snake_case__ ) )
return spm
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Union[Dict, List]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as f:
return json.load(snake_case__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> None:
'''simple docstring'''
with open(snake_case__ , 'w' ) as f:
json.dump(snake_case__ , snake_case__ , indent=2 )
| 7 |
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Tuple = {"""vocab_file""": """spiece.model"""}
lowercase__ : int = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
lowercase__ : Tuple = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Optional[Any] = ['input_ids', 'attention_mask']
_snake_case : Dict = []
def __init__( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]="<unk>" , lowerCAmelCase__ : List[str]="<s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : int="<pad>" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Optional[int]="[MASK]" , lowerCAmelCase__ : List[str]="[CLS]" , lowerCAmelCase__ : Optional[Any] = None , **lowerCAmelCase__ : Tuple , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
_UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : int , lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.sp_model.IdToPiece(_a )
return token
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(_a )
_UpperCamelCase = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] = False , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : Optional[Any] = True , **lowerCAmelCase__ : Tuple , ) -> Any:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , _a )
_UpperCamelCase = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase = []
_UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
_UpperCamelCase = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(_a ) )
else:
_UpperCamelCase = ''''''.join(_a )
_UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple = None ) -> str:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple = None ) -> Dict:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Optional[Any] = False ) -> Any:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] = None ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 324 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
"""simple docstring"""
def A__ ( ):
return 1
def A__ ( UpperCamelCase ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def A__ ( UpperCamelCase ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase )
def A__ ( UpperCamelCase ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase )
def A__ ( UpperCamelCase ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase )
def A__ ( UpperCamelCase ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase )
def A__ ( UpperCamelCase ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase )
def A__ ( UpperCamelCase ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase )
def A__ ( UpperCamelCase = 200 ):
return two_pound(UpperCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 292 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Dict = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def A__ ( UpperCamelCase , UpperCamelCase ):
A = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
A = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=UpperCamelCase , output_all_encodings=UpperCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , UpperCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
A = os.path.join(get_home_dir() , "models" )
A = _load_vocab(UpperCamelCase , UpperCamelCase , UpperCamelCase , cls=UpperCamelCase )
A = nlp.model.BERTModel(
UpperCamelCase , len(UpperCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=UpperCamelCase , use_token_type_embed=UpperCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=UpperCamelCase , use_decoder=UpperCamelCase , )
original_bort.load_parameters(UpperCamelCase , cast_dtype=UpperCamelCase , ignore_extra=UpperCamelCase )
A = original_bort._collect_params_with_prefix()
# Build our config 🤗
A = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(UpperCamelCase ),
}
A = BertConfig.from_dict(UpperCamelCase )
A = BertForMaskedLM(UpperCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCamelCase , UpperCamelCase ):
A = hf_param.shape
A = to_torch(params[gluon_param] )
A = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
A = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
A = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A = hf_bort_model.bert.encoder.layer[i]
# self attention
A = layer.attention.self
A = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
A = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
A = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
A = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
A = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
A = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
A = layer.attention.output
A = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
A = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
A = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
A = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
A = layer.intermediate
A = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
A = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
A = layer.output
A = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
A = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
A = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
A = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A = RobertaTokenizer.from_pretrained("roberta-base" )
A = tokenizer.encode_plus(UpperCamelCase )["input_ids"]
# Get gluon output
A = mx.nd.array([input_ids] )
A = original_bort(inputs=UpperCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase )
A = BertModel.from_pretrained(UpperCamelCase )
hf_bort_model.eval()
A = tokenizer.encode_plus(UpperCamelCase , return_tensors="pt" )
A = hf_bort_model(**UpperCamelCase )[0]
A = output_gluon[0].asnumpy()
A = output_hf[0].detach().numpy()
A = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A = np.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , UpperCamelCase )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 292 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A__ ( UpperCamelCase ):
A = botoa.client("iam" )
A = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCamelCase , AssumeRolePolicyDocument=json.dumps(UpperCamelCase , indent=2 ) )
A = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCamelCase , PolicyName=F"{role_name}_policy_permission" , PolicyDocument=json.dumps(UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"role {role_name} already exists. Using existing one" )
def A__ ( UpperCamelCase ):
A = botoa.client("iam" )
return iam_client.get_role(RoleName=UpperCamelCase )["Role"]["Arn"]
def A__ ( ):
A = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , UpperCamelCase , )
A = None
if credentials_configuration == 0:
A = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
A = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
A = _ask_field("AWS Access Key ID: " )
A = aws_access_key_id
A = _ask_field("AWS Secret Access Key: " )
A = aws_secret_access_key
A = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
A = aws_region
A = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , UpperCamelCase , )
if role_management == 0:
A = _ask_field("Enter your IAM role name: " )
else:
A = "accelerate_sagemaker_execution_role"
print(F"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(UpperCamelCase )
A = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase , error_message="Please enter yes or no." , )
A = None
if is_custom_docker_image:
A = _ask_field("Enter your Docker image: " , lambda UpperCamelCase : str(UpperCamelCase ).lower() )
A = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase , error_message="Please enter yes or no." , )
A = None
if is_sagemaker_inputs_enabled:
A = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda UpperCamelCase : str(UpperCamelCase ).lower() , )
A = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase , error_message="Please enter yes or no." , )
A = None
if is_sagemaker_metrics_enabled:
A = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda UpperCamelCase : str(UpperCamelCase ).lower() , )
A = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
A = {}
A = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=UpperCamelCase , error_message="Please enter yes or no." , )
if use_dynamo:
A = "dynamo_"
A = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase , error_message="Please enter yes or no." , )
if use_custom_options:
A = _ask_options(
"Which mode do you want to use?" , UpperCamelCase , lambda UpperCamelCase : TORCH_DYNAMO_MODES[int(UpperCamelCase )] , default="default" , )
A = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase , error_message="Please enter yes or no." , )
A = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase , error_message="Please enter yes or no." , )
A = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
A = _ask_options(
UpperCamelCase , UpperCamelCase , lambda UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A = _ask_field(UpperCamelCase , lambda UpperCamelCase : str(UpperCamelCase ).lower() , default="ml.p3.2xlarge" )
A = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A = _ask_field(
"How many machines do you want use? [1]: " , UpperCamelCase , default=1 , )
A = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCamelCase , use_cpu=UpperCamelCase , dynamo_config=UpperCamelCase , eca_instance_type=UpperCamelCase , profile=UpperCamelCase , region=UpperCamelCase , iam_role_name=UpperCamelCase , mixed_precision=UpperCamelCase , num_machines=UpperCamelCase , sagemaker_inputs_file=UpperCamelCase , sagemaker_metrics_file=UpperCamelCase , )
| 292 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 1 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_snake_case : Optional[List[str]] = None
_snake_case : Any = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_snake_case : Dict = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _UpperCAmelCase :
UpperCamelCase = True
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "PIL.Image.Image"
UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase = field(default='''Image''' , init=lowercase_ , repr=lowercase_ )
def __call__( self :Dict ):
return self.pa_type
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = np.array(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__UpperCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__UpperCamelCase )
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__UpperCamelCase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCamelCase ( self :Dict , __UpperCamelCase :dict , __UpperCamelCase :List[str]=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
A = {}
A, A = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__UpperCamelCase ):
A = PIL.Image.open(__UpperCamelCase )
else:
A = path.split("::" )[-1]
try:
A = string_to_dict(__UpperCamelCase , config.HUB_DATASETS_URL )["repo_id"]
A = token_per_repo_id.get(__UpperCamelCase )
except ValueError:
A = None
with xopen(__UpperCamelCase , "rb" , use_auth_token=__UpperCamelCase ) as f:
A = BytesIO(f.read() )
A = PIL.Image.open(bytes_ )
else:
A = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase ( self :Optional[int] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
A = pa.array([None] * len(__UpperCamelCase ) , type=pa.binary() )
A = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A = pa.array([None] * len(__UpperCamelCase ) , type=pa.string() )
A = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
A = storage.field("bytes" )
else:
A = pa.array([None] * len(__UpperCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
A = storage.field("path" )
else:
A = pa.array([None] * len(__UpperCamelCase ) , type=pa.string() )
A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A = pa.array(
[encode_np_array(np.array(__UpperCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A = pa.array([None] * len(__UpperCamelCase ) , type=pa.string() )
A = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCamelCase , self.pa_type )
def lowerCamelCase ( self :str , __UpperCamelCase :pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__UpperCamelCase :List[str] ):
with xopen(__UpperCamelCase , "rb" ) as f:
A = f.read()
return bytes_
A = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A = pa.array(
[os.path.basename(__UpperCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCamelCase , self.pa_type )
def A__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A__ ( UpperCamelCase ):
A = BytesIO()
if image.format in list_image_compression_formats():
A = image.format
else:
A = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(UpperCamelCase , format=UpperCamelCase )
return buffer.getvalue()
def A__ ( UpperCamelCase ):
if hasattr(UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCamelCase )}
def A__ ( UpperCamelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
A = array.dtype
A = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
A = dtype.kind
A = dtype.itemsize
A = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A = dtype_byteorder + dtype_kind + str(UpperCamelCase )
A = np.dtype(UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
A = PIL.Image.fromarray(array.astype(UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(UpperCamelCase )}
def A__ ( UpperCamelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
A, A = first_non_null_value(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCamelCase , np.ndarray ):
A = no_op_if_value_is_null(UpperCamelCase )
return [obj_to_image_dict_func(UpperCamelCase ) for obj in objs]
elif isinstance(UpperCamelCase , PIL.Image.Image ):
A = no_op_if_value_is_null(UpperCamelCase )
return [obj_to_image_dict_func(UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 292 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 1 |
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A = cst_fwd.get(UpperCamelCase , np.inf )
A = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A = new_cost_f
A = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = -1
A = set()
A = set()
A = {source: 0}
A = {destination: 0}
A = {source: None}
A = {destination: None}
A = PriorityQueue()
A = PriorityQueue()
A = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A, A = queue_forward.get()
visited_forward.add(UpperCamelCase )
A, A = queue_backward.get()
visited_backward.add(UpperCamelCase )
A = pass_and_relaxation(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
A = pass_and_relaxation(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A = shortest_distance
return shortest_path_distance
_snake_case : List[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_snake_case : Optional[int] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 1 |
"""simple docstring"""
def A__ ( UpperCamelCase = 10 , UpperCamelCase = 1_000 , UpperCamelCase = True ):
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def A__ ( UpperCamelCase , UpperCamelCase ):
return int((number_a + number_a) / 2 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
assert (
isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(UpperCamelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
A = lower
A = higher
A = []
while True:
A = get_avg(UpperCamelCase , UpperCamelCase )
last_numbers.append(UpperCamelCase )
if answer(UpperCamelCase ) == "low":
A = number
elif answer(UpperCamelCase ) == "high":
A = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def A__ ( ):
A = int(input("Enter lower value : " ).strip() )
A = int(input("Enter high value : " ).strip() )
A = int(input("Enter value to guess : " ).strip() )
guess_the_number(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 292 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : List[Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''ctrl'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Optional[int] , __UpperCamelCase :Any=24_65_34 , __UpperCamelCase :Optional[int]=2_56 , __UpperCamelCase :Optional[int]=12_80 , __UpperCamelCase :List[Any]=81_92 , __UpperCamelCase :Any=48 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Any=1e-6 , __UpperCamelCase :Any=0.02 , __UpperCamelCase :Optional[int]=True , **__UpperCamelCase :Optional[int] , ):
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = dff
A = resid_pdrop
A = embd_pdrop
A = layer_norm_epsilon
A = initializer_range
A = use_cache
super().__init__(**__UpperCamelCase )
| 292 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 1 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_snake_case : Union[str, Any] = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
_snake_case : Dict = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
_snake_case : List[str] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self :Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[Any]=None ):
return {
"matthews_correlation": float(matthews_corrcoef(__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase ) ),
}
| 292 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 1 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A__ ( UpperCamelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCAmelCase ( nn.Module ):
def __init__( self :List[Any] , __UpperCamelCase :nn.Module , __UpperCamelCase :int ):
super().__init__()
A = module
A = nn.Sequential(
nn.Linear(module.in_features , __UpperCamelCase , bias=__UpperCamelCase ) , nn.Linear(__UpperCamelCase , module.out_features , bias=__UpperCamelCase ) , )
A = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[Any] , *__UpperCamelCase :int , **__UpperCamelCase :List[Any] ):
return self.module(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) + self.adapter(__UpperCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
UpperCamelCase = '''bigscience/bloom-1b7'''
# Constant values
UpperCamelCase = 2.109_6595_5269_2574
UpperCamelCase = '''Hello my name is'''
UpperCamelCase = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCamelCase = 1_0
def lowerCamelCase ( self :Any ):
# Models and tokenizer
A = AutoTokenizer.from_pretrained(self.model_name )
class _UpperCAmelCase ( lowercase_ ):
def lowerCamelCase ( self :Optional[int] ):
super().setUp()
# Models and tokenizer
A = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
A = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
def lowerCamelCase ( self :Tuple ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :List[Any] ):
A = self.model_abit.config
self.assertTrue(hasattr(__UpperCamelCase , "quantization_config" ) )
A = config.to_dict()
A = config.to_diff_dict()
A = config.to_json_string()
def lowerCamelCase ( self :int ):
from bitsandbytes.nn import Paramsabit
A = self.model_fpaa.get_memory_footprint()
A = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowerCamelCase ( self :int ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowerCamelCase ( self :Dict ):
A = self.tokenizer(self.input_text , return_tensors="pt" )
A = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self :List[Any] ):
A = BitsAndBytesConfig()
A = True
A = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCamelCase , device_map="auto" )
A = self.tokenizer(self.input_text , return_tensors="pt" )
A = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self :Any ):
with self.assertRaises(__UpperCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] ):
A = BitsAndBytesConfig()
with self.assertRaises(__UpperCamelCase ):
A = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCamelCase , load_in_abit=__UpperCamelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def lowerCamelCase ( self :Tuple ):
with self.assertRaises(__UpperCamelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__UpperCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__UpperCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A = self.tokenizer(self.input_text , return_tensors="pt" )
A = self.model_fpaa.to(torch.floataa )
A = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A = self.model_fpaa.to("cpu" )
# Check this does not throw an error
A = self.model_fpaa.half()
# Check this does not throw an error
A = self.model_fpaa.float()
def lowerCamelCase ( self :Optional[int] ):
A = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__UpperCamelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls :Optional[Any] ):
A = "t5-small"
A = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
A = AutoTokenizer.from_pretrained(cls.model_name )
A = "Translate in German: Hello, my dog is cute"
def lowerCamelCase ( self :Dict ):
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :str ):
from transformers import TaForConditionalGeneration
A = TaForConditionalGeneration._keep_in_fpaa_modules
A = None
# test with `t5-small`
A = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
A = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A = model.generate(**__UpperCamelCase )
# test with `flan-t5-small`
A = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
A = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A = model.generate(**__UpperCamelCase )
A = modules
def lowerCamelCase ( self :Union[str, Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A = model.generate(**__UpperCamelCase )
# test with `flan-t5-small`
A = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
A = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
A = model.generate(**__UpperCamelCase )
class _UpperCAmelCase ( lowercase_ ):
def lowerCamelCase ( self :List[Any] ):
super().setUp()
# model_name
A = "bigscience/bloom-560m"
A = "t5-small"
# Different types of model
A = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
# Sequence classification model
A = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
# CausalLM model
A = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map="auto" )
# Seq2seq model
A = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCamelCase , device_map="auto" )
def lowerCamelCase ( self :Any ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :Optional[Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _UpperCAmelCase ( lowercase_ ):
def lowerCamelCase ( self :str ):
super().setUp()
def lowerCamelCase ( self :Tuple ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :List[str] ):
A = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _UpperCAmelCase ( lowercase_ ):
def lowerCamelCase ( self :Optional[int] ):
super().setUp()
def lowerCamelCase ( self :Optional[int] ):
A = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCamelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
A = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCamelCase ) , self.EXPECTED_OUTPUTS )
class _UpperCAmelCase ( lowercase_ ):
def lowerCamelCase ( self :Optional[Any] ):
A = "facebook/opt-350m"
super().setUp()
def lowerCamelCase ( self :Optional[int] ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
A = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCamelCase ) ):
A = LoRALayer(module.q_proj , rank=16 )
A = LoRALayer(module.k_proj , rank=16 )
A = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A = model.forward(**__UpperCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''gpt2-xl'''
UpperCamelCase = 3.3191_8548_5415_2187
| 292 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 1 |
"""simple docstring"""
_snake_case : Tuple = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_snake_case : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = True
A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
order.append(UpperCamelCase )
return order
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = True
A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return component
def A__ ( UpperCamelCase ):
A = len(UpperCamelCase ) * [False]
A = {vert: [] for vert in range(len(UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase )
A = []
for i, was_visited in enumerate(UpperCamelCase ):
if not was_visited:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = []
A = len(UpperCamelCase ) * [False]
for i in range(len(UpperCamelCase ) ):
A = order[len(UpperCamelCase ) - i - 1]
if not visited[vert]:
A = find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
components_list.append(UpperCamelCase )
return components_list
| 292 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A__ ( UpperCamelCase , UpperCamelCase=7 ):
A = None
if token is not None:
A = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
A = "636036"
A = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
A = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
return result["workflow_runs"]
def A__ ( UpperCamelCase ):
A = get_daily_ci_runs(UpperCamelCase )
A = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A = workflow_run["id"]
break
return workflow_run_id
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = get_last_daily_ci_runs(UpperCamelCase )
if workflow_run_id is not None:
A = get_artifacts_links(worflow_run_id=UpperCamelCase , token=UpperCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCamelCase , artifact_url=UpperCamelCase , output_dir=UpperCamelCase , token=UpperCamelCase )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
get_last_daily_ci_artifacts(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = {}
for artifact_name in artifact_names:
A = os.path.join(UpperCamelCase , F"{artifact_name}.zip" )
if os.path.isfile(UpperCamelCase ):
A = {}
with zipfile.ZipFile(UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase ):
# read the file
with z.open(UpperCamelCase ) as f:
A = f.read().decode("UTF-8" )
return results
| 292 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 1 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
_snake_case : Dict = namedtuple('covid_data', 'cases deaths recovered')
def A__ ( UpperCamelCase = "https://www.worldometers.info/coronavirus/" ):
A = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(UpperCamelCase ).content ).xpath(UpperCamelCase ) )
_snake_case : List[Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 292 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 1 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :List[str] ):
A = logging.get_logger()
# the current default level is logging.WARNING
A = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__UpperCamelCase )
def lowerCamelCase ( self :int ):
A = logging.get_verbosity()
A = logging.get_logger("transformers.models.bart.tokenization_bart" )
A = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning(__UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning(__UpperCamelCase )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning(__UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(__UpperCamelCase )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def lowerCamelCase ( self :Tuple ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A = logging.get_logger("transformers.models.bart.tokenization_bart" )
A = os.getenv("TRANSFORMERS_VERBOSITY" , __UpperCamelCase )
A = logging.log_levels[env_level_str]
A = logging.get_verbosity()
self.assertEqual(
__UpperCamelCase , __UpperCamelCase , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
A = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def lowerCamelCase ( self :Union[str, Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
A = logging.logging.getLogger()
with CaptureLogger(__UpperCamelCase ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def lowerCamelCase ( self :Dict ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
A = logging.get_logger("transformers.models.bart.tokenization_bart" )
A = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning_advice(__UpperCamelCase )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning_advice(__UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
def A__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 292 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A__ ( UpperCamelCase ):
A, A = analyze_text(UpperCamelCase )
A = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
A = sum(single_char_strings.values() )
# one length string
A = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
A = single_char_strings[ch]
A = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
A = sum(two_char_strings.values() )
A = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
A = cha + cha
if sequence in two_char_strings:
A = two_char_strings[sequence]
A = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def A__ ( UpperCamelCase ):
A = Counter() # type: ignore
A = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A__ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 292 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase ( self :List[str] ):
return self._get_dummy_components()
def lowerCamelCase ( self :Any , __UpperCamelCase :List[str] , __UpperCamelCase :Dict=0 ):
if str(__UpperCamelCase ).startswith("mps" ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase ( self :Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase ( self :Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCamelCase ( self :Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase ( self :Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase ( self :Tuple ):
self._test_save_load_local()
def lowerCamelCase ( self :str ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 292 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 1 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_snake_case : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_snake_case : Optional[int] = [0, 25, 50]
_snake_case : Optional[int] = [25, 50, 75]
_snake_case : Union[str, Any] = fuzz.membership.trimf(X, abca)
_snake_case : str = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_snake_case : Optional[Any] = np.ones(75)
_snake_case : int = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_snake_case : Tuple = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_snake_case : Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_snake_case : Optional[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_snake_case : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_snake_case : str = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_snake_case : Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_snake_case : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_snake_case : Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 292 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , UpperCamelCase )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(UpperCamelCase )
assert result == expected
| 292 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : str = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'spiece.model'}
_snake_case : List[str] = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
_snake_case : Any = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :List[Any] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any]=False , __UpperCamelCase :Any=False , __UpperCamelCase :Any=False , __UpperCamelCase :Any=None , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[Any]=None , __UpperCamelCase :int=None , __UpperCamelCase :Optional[Dict[str, Any]] = None , **__UpperCamelCase :Optional[Any] , ):
A = {} if sp_model_kwargs is None else sp_model_kwargs
A = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
A = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A = "<|endoftext|>" if eos_token is None else eos_token
A = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A = unk_token if pad_token is None else pad_token
A = eos_token if bos_token is None else bos_token
else:
A = "<pad>" if pad_token is None else pad_token
A = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A = re.compile(
f"[{''.join(map(__UpperCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]" )
def __getstate__( self :str ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self :Tuple , __UpperCamelCase :Optional[Any] ):
A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase ( self :int ):
return len(self.sp_model )
def lowerCamelCase ( self :str , __UpperCamelCase :str ):
A = self.non_printing_characters_re.sub("" , __UpperCamelCase )
# Normalize whitespaces
A = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
A = unicodedata.normalize("NFC" , __UpperCamelCase )
return text
def lowerCamelCase ( self :Tuple , __UpperCamelCase :str , **__UpperCamelCase :List[Any] ):
A = self.preprocess_text(__UpperCamelCase )
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCamelCase ( self :str , __UpperCamelCase :str ):
return self.sp_model.PieceToId(__UpperCamelCase )
def lowerCamelCase ( self :Any , __UpperCamelCase :int ):
return self.sp_model.IdToPiece(__UpperCamelCase )
@staticmethod
def lowerCamelCase ( __UpperCamelCase :str ):
return out_string
def lowerCamelCase ( self :str , __UpperCamelCase :List[str] ):
A = []
A = ""
A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
A = True
A = []
else:
current_sub_tokens.append(__UpperCamelCase )
A = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string
def lowerCamelCase ( self :Dict ):
A = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowerCamelCase ( self :int , __UpperCamelCase :Union[str, List[str]] , __UpperCamelCase :Union[str, bool] = False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = self.preprocess_text(__UpperCamelCase )
A = self.sp_model.encode(__UpperCamelCase )
else:
A = [self.preprocess_text(__UpperCamelCase ) for t in text]
A = self.sp_model.encode(__UpperCamelCase )
if return_tensors is True or return_tensors == "pt":
A = torch.tensor(__UpperCamelCase )
return token_ids
def lowerCamelCase ( self :int , __UpperCamelCase :Union[int, List[int]] ):
return self.sp_model.decode(__UpperCamelCase )
def lowerCamelCase ( self :int , __UpperCamelCase :"Conversation" ):
A = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
A = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(__UpperCamelCase ) + f"{self.bos_token}Bot:"
)
return self.encode(text=__UpperCamelCase )
| 292 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_snake_case : int = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def A__ ( UpperCamelCase , UpperCamelCase ):
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
A = json.loads(f.read() )
A = collections.OrderedDict()
A = collections.OrderedDict()
A = collections.OrderedDict()
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
A = f.readlines()
A = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(UpperCamelCase ):
A = b
A = idx
for wd in b:
A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Any , __UpperCamelCase :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Tuple="<|endoftext|>" , __UpperCamelCase :List[str]="<|endoftext|>" , __UpperCamelCase :Any="<|startoftext|>" , __UpperCamelCase :List[Any]="<|endoftext|>" , __UpperCamelCase :Optional[int]=False , **__UpperCamelCase :Dict , ):
super().__init__(
unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , do_clean_text=__UpperCamelCase , **__UpperCamelCase , )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
A = do_clean_text
A, A, A, A = load_vocab_and_emoji(__UpperCamelCase , __UpperCamelCase )
A = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase ( self :Union[str, Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowerCamelCase ( self :Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Tuple ):
return self.subword_tokenizer.tokenize(__UpperCamelCase , clean=self.do_clean_text )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Any ):
return self.subword_tokenizer.convert_id_to_token(__UpperCamelCase )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Tuple ):
A = "".join(__UpperCamelCase ).strip()
return out_string
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :"Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ):
A = 0
if os.path.isdir(__UpperCamelCase ):
A = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
A = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
A = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
A = token_index
writer.write(",".join(__UpperCamelCase ) + "\n" )
index += 1
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __UpperCamelCase )
return vocab_file, emoji_file
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Optional[int] , __UpperCamelCase :str , __UpperCamelCase :Optional[int] , __UpperCamelCase :str ):
A = vocab # same as swe
A = ids_to_tokens # same as bpe
A = emoji
A = np.max([len(__UpperCamelCase ) for w in self.vocab.keys()] )
A = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
A = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
A = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
A = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
A = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
A = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
A = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self :Any ):
return len(self.ids_to_tokens )
def lowerCamelCase ( self :Any , __UpperCamelCase :Any ):
A = self.content_repattera.sub("<URL>" , __UpperCamelCase )
A = self.content_repattera.sub("<EMAIL>" , __UpperCamelCase )
A = self.content_repattera.sub("<TEL>" , __UpperCamelCase )
A = self.content_repattera.sub("<DATE>" , __UpperCamelCase )
A = self.content_repattera.sub("<DATE>" , __UpperCamelCase )
A = self.content_repattera.sub("<PRICE>" , __UpperCamelCase )
A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Tuple=False ):
A = text.replace(" " , "<SP>" )
A = text.replace(" " , "<SP>" )
A = text.replace("\r\n" , "<BR>" )
A = text.replace("\n" , "<BR>" )
A = text.replace("\r" , "<BR>" )
A = text.replace("\t" , "<TAB>" )
A = text.replace("—" , "ー" )
A = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
A = text.replace(__UpperCamelCase , __UpperCamelCase )
if clean:
A = self.clean_text(__UpperCamelCase )
def check_simbol(__UpperCamelCase :List[str] ):
A = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 2:
A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(__UpperCamelCase :Any ):
A = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 3:
A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
A = 0
A = []
while pos < len(__UpperCamelCase ):
A = min(len(__UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
A = [] # (token_id, token, pos)
for e in range(__UpperCamelCase , __UpperCamelCase , -1 ):
A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCamelCase ) > 2:
A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCamelCase ) > 0:
# the smallest token_id is adopted
A, A, A = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[0] )[0]
result.append(__UpperCamelCase )
A = e
else:
A = pos + 1
A = text[pos:end]
if check_simbol(__UpperCamelCase ):
result.append("<KIGOU>" )
elif checkuae(__UpperCamelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
A = end
return result
def lowerCamelCase ( self :Dict , __UpperCamelCase :str , __UpperCamelCase :List[str]="\n" ):
A = []
A = []
A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode("utf-8" , errors="replace" ) )
A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__UpperCamelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode("utf-8" , errors="replace" ) )
A = "".join(__UpperCamelCase )
return text
| 292 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_snake_case : List[Any] = threading.Lock()
_snake_case : Optional[logging.Handler] = None
_snake_case : Optional[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_snake_case : List[Any] = logging.WARNING
_snake_case : Optional[int] = True
def A__ ( ):
A = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def A__ ( ):
return __name__.split("." )[0]
def A__ ( ):
return logging.getLogger(_get_library_name() )
def A__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
A = logging.StreamHandler() # Set sys.stderr as stream.
A = sys.stderr.flush
# Apply our default configuration to the library root logger.
A = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
A = False
def A__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
A = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
A = None
def A__ ( ):
return log_levels
def A__ ( UpperCamelCase = None ):
if name is None:
A = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase )
def A__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A__ ( UpperCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase )
def A__ ( ):
return set_verbosity(UpperCamelCase )
def A__ ( ):
return set_verbosity(UpperCamelCase )
def A__ ( ):
return set_verbosity(UpperCamelCase )
def A__ ( ):
return set_verbosity(UpperCamelCase )
def A__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A__ ( UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase )
def A__ ( UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase )
def A__ ( ):
_configure_library_root_logger()
A = False
def A__ ( ):
_configure_library_root_logger()
A = True
def A__ ( ):
A = _get_library_root_logger().handlers
for handler in handlers:
A = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCamelCase )
def A__ ( ):
A = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase )
def A__ ( self , *UpperCamelCase , **UpperCamelCase ):
A = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase , **UpperCamelCase )
_snake_case : int = warning_advice
@functools.lru_cache(UpperCamelCase )
def A__ ( self , *UpperCamelCase , **UpperCamelCase ):
self.warning(*UpperCamelCase , **UpperCamelCase )
_snake_case : Optional[int] = warning_once
class _UpperCAmelCase :
def __init__( self :Tuple , *__UpperCamelCase :str , **__UpperCamelCase :Optional[Any] ): # pylint: disable=unused-argument
A = args[0] if args else None
def __iter__( self :str ):
return iter(self._iterator )
def __getattr__( self :Optional[int] , __UpperCamelCase :List[str] ):
def empty_fn(*__UpperCamelCase :List[Any] , **__UpperCamelCase :Optional[int] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :Union[str, Any] ):
return self
def __exit__( self :str , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :int ):
return
class _UpperCAmelCase :
def __call__( self :str , *__UpperCamelCase :Any , **__UpperCamelCase :Dict ):
if _tqdm_active:
return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase )
else:
return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Any , *__UpperCamelCase :Optional[Any] , **__UpperCamelCase :int ):
A = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_snake_case : Any = _tqdm_cls()
def A__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def A__ ( ):
global _tqdm_active
A = True
hf_hub_utils.enable_progress_bars()
def A__ ( ):
global _tqdm_active
A = False
hf_hub_utils.disable_progress_bars()
| 292 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
def __init__( self :Optional[int] , __UpperCamelCase :str , __UpperCamelCase :Dict=13 , __UpperCamelCase :str=7 , __UpperCamelCase :Tuple=True , __UpperCamelCase :Optional[Any]=True , __UpperCamelCase :Tuple=True , __UpperCamelCase :Optional[Any]=True , __UpperCamelCase :Any=99 , __UpperCamelCase :Any=16 , __UpperCamelCase :Tuple=36 , __UpperCamelCase :Tuple=6 , __UpperCamelCase :Optional[int]=6 , __UpperCamelCase :Union[str, Any]=6 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :List[Any]="gelu" , __UpperCamelCase :Dict=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Dict=5_12 , __UpperCamelCase :Dict=16 , __UpperCamelCase :Dict=2 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :List[Any]=3 , __UpperCamelCase :int=4 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = embedding_size
A = hidden_size
A = num_hidden_layers
A = num_hidden_groups
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def lowerCamelCase ( self :List[Any] ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self :List[str] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Any , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Tuple ):
A = AlbertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Any , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[int] , __UpperCamelCase :Union[str, Any] ):
A = AlbertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , sentence_order_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[Any] ):
A = AlbertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :List[str] , __UpperCamelCase :int , __UpperCamelCase :int , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Tuple ):
A = AlbertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self :int , __UpperCamelCase :str , __UpperCamelCase :int , __UpperCamelCase :int , __UpperCamelCase :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Any ):
A = self.num_labels
A = AlbertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Dict ):
A = self.num_labels
A = AlbertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :str , __UpperCamelCase :Any , __UpperCamelCase :Any , __UpperCamelCase :Optional[int] ):
A = self.num_choices
A = AlbertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self :List[str] ):
A = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) = config_and_inputs
A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :Optional[int]=False ):
A = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def lowerCamelCase ( self :List[str] ):
A = AlbertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Tuple ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :Union[str, Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AlbertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self :Optional[Any] ):
A = AlbertModel.from_pretrained("albert-base-v2" )
A = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
A = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) )
| 292 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.