code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__snake_case :Dict =logging.get_logger(__name__)
class lowerCAmelCase__ :
def __init__( self : str , __UpperCamelCase : str = None , __UpperCamelCase : uuid.UUID = None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=None ) -> Optional[int]:
if not conversation_id:
A = uuid.uuida()
if past_user_inputs is None:
A = []
if generated_responses is None:
A = []
A = conversation_id
A = past_user_inputs
A = generated_responses
A = text
def __eq__( self : Tuple , __UpperCamelCase : int ) -> Union[str, Any]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCamelCase ( self : str , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> Union[str, Any]:
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
A = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
A = text
def __UpperCamelCase ( self : str ) -> List[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A = None
def __UpperCamelCase ( self : Any , __UpperCamelCase : str ) -> List[Any]:
self.generated_responses.append(__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ) -> Dict:
A = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
A = 'user' if is_user else 'bot'
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
_lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ) -> str:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if self.tokenizer.pad_token_id is None:
A = self.tokenizer.eos_token
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : int ) -> Optional[Any]:
A = {}
A = {}
A = {}
if min_length_for_response is not None:
A = min_length_for_response
if minimum_tokens is not None:
A = minimum_tokens
if "max_length" in generate_kwargs:
A = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , __UpperCamelCase : Union[Conversation, List[Conversation]] , __UpperCamelCase : List[str]=0 , **__UpperCamelCase : str ) -> Any:
A = super().__call__(__UpperCamelCase , num_workers=__UpperCamelCase , **__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) == 1:
return outputs[0]
return outputs
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Conversation , __UpperCamelCase : Union[str, Any]=32 ) -> Dict[str, Any]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
A = self.tokenizer._build_conversation_input_ids(__UpperCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A = self._legacy_parse_and_tokenize(__UpperCamelCase )
if self.framework == "pt":
A = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=10 , **__UpperCamelCase : Dict ) -> Any:
A = generate_kwargs.get('max_length' , self.model.config.max_length )
A = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
A = max_length - minimum_tokens
A = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
A = model_inputs['attention_mask'][:, -trim:]
A = model_inputs.pop('conversation' )
A = max_length
A = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
if self.model.config.is_encoder_decoder:
A = 1
else:
A = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Any=True ) -> int:
A = model_outputs['output_ids']
A = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
A = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(__UpperCamelCase )
return conversation
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Conversation ) -> Dict:
A = self.tokenizer.eos_token_id
A = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
if len(__UpperCamelCase ) > self.tokenizer.model_max_length:
A = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 106
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Dict = KandinskyInpaintPipeline
lowercase : Any = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase : Any = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : Dict = False
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 1_00
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase : Optional[int] = MultilingualCLIP(__UpperCamelCase )
__UpperCamelCase : int = text_encoder.eval()
return text_encoder
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase : List[Any] = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : Any = self.dummy_tokenizer
__UpperCamelCase : Tuple = self.dummy_unet
__UpperCamelCase : List[str] = self.dummy_movq
__UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCamelCase , )
__UpperCamelCase : Dict = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__UpperCamelCase : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCamelCase )
# create init_image
__UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__UpperCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Optional[Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
__UpperCamelCase : Dict = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase : List[Any] = 0
if str(__UpperCamelCase ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(__UpperCamelCase )
else:
__UpperCamelCase : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__UpperCamelCase : Any = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = "cpu"
__UpperCamelCase : Any = self.get_dummy_components()
__UpperCamelCase : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
__UpperCamelCase : str = output.images
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Tuple = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase : str = np.ones((7_68, 7_68) , dtype=np.floataa )
__UpperCamelCase : Dict = 0
__UpperCamelCase : Union[str, Any] = "a hat"
__UpperCamelCase : Any = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
__UpperCamelCase : Any = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCamelCase : Any = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCamelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase : int = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase : List[Any] = pipeline(
__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
__UpperCamelCase : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 327
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : Optional[Any] = 'unispeech'
def __init__( self :str , __magic_name__ :Dict=32 , __magic_name__ :Optional[Any]=768 , __magic_name__ :int=12 , __magic_name__ :Any=12 , __magic_name__ :Optional[Any]=3072 , __magic_name__ :List[str]="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[str]=0.0 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Union[str, Any]=0.02 , __magic_name__ :List[Any]=1e-5 , __magic_name__ :Optional[int]="group" , __magic_name__ :int="gelu" , __magic_name__ :List[Any]=(512, 512, 512, 512, 512, 512, 512) , __magic_name__ :List[Any]=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ :str=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ :Optional[int]=False , __magic_name__ :Tuple=128 , __magic_name__ :Union[str, Any]=16 , __magic_name__ :Dict=False , __magic_name__ :Tuple=True , __magic_name__ :List[str]=0.05 , __magic_name__ :str=10 , __magic_name__ :str=2 , __magic_name__ :int=0.0 , __magic_name__ :str=10 , __magic_name__ :Optional[int]=0 , __magic_name__ :int=320 , __magic_name__ :List[Any]=2 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :List[Any]=100 , __magic_name__ :str=256 , __magic_name__ :Dict=256 , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :List[Any]="mean" , __magic_name__ :str=False , __magic_name__ :Dict=False , __magic_name__ :Dict=256 , __magic_name__ :List[str]=80 , __magic_name__ :Any=0 , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Any=2 , __magic_name__ :List[str]=0.5 , **__magic_name__ :Any , ) -> Any:
'''simple docstring'''
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
a__ = hidden_size
a__ = feat_extract_norm
a__ = feat_extract_activation
a__ = list(__magic_name__ )
a__ = list(__magic_name__ )
a__ = list(__magic_name__ )
a__ = conv_bias
a__ = num_conv_pos_embeddings
a__ = num_conv_pos_embedding_groups
a__ = len(self.conv_dim )
a__ = num_hidden_layers
a__ = intermediate_size
a__ = hidden_act
a__ = num_attention_heads
a__ = hidden_dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = feat_proj_dropout
a__ = final_dropout
a__ = layerdrop
a__ = layer_norm_eps
a__ = initializer_range
a__ = num_ctc_classes
a__ = vocab_size
a__ = do_stable_layer_norm
a__ = use_weighted_layer_sum
a__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ = apply_spec_augment
a__ = mask_time_prob
a__ = mask_time_length
a__ = mask_time_min_masks
a__ = mask_feature_prob
a__ = mask_feature_length
a__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a__ = num_codevectors_per_group
a__ = num_codevector_groups
a__ = contrastive_logits_temperature
a__ = feat_quantizer_dropout
a__ = num_negatives
a__ = codevector_dim
a__ = proj_codevector_dim
a__ = diversity_loss_weight
# ctc loss
a__ = ctc_loss_reduction
a__ = ctc_zero_infinity
# pretraining loss
a__ = replace_prob
@property
def _UpperCamelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 703
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
a__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
a__ = features.copy()
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = tmp_path / '''cache'''
a__ = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if issubclass(UpperCamelCase , UpperCamelCase ):
a__ = jsonl_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
a__ = [jsonl_path]
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
a__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = JsonDatasetReader({'''train''': jsonl_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if split:
a__ = {split: jsonl_path}
else:
a__ = '''train'''
a__ = {'''train''': jsonl_path, '''test''': jsonl_path}
a__ = tmp_path / '''cache'''
a__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
a__ = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( UpperCamelCase ) -> Any:
"""simple docstring"""
return json.load(UpperCamelCase )
def __snake_case ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return [json.loads(UpperCamelCase ) for line in buffer]
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Any , __magic_name__ :Optional[int] ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ ).write()
buffer.seek(0 )
a__ = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCamelCase ( self :Union[str, Any] , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :Any ) -> List[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ ).write()
buffer.seek(0 )
a__ = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCamelCase ( self :Any , __magic_name__ :Any , __magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Any ) -> Optional[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
a__ = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :int ) -> Dict:
'''simple docstring'''
with pytest.raises(__magic_name__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def _UpperCamelCase ( self :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ = tmp_path_factory.mktemp('''data''' ) / F"test.json.{extension}"
a__ = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(__magic_name__ , __magic_name__ , compression=__magic_name__ ).write()
with fsspec.open(__magic_name__ , '''rb''' , compression='''infer''' ) as f:
a__ = f.read()
with fsspec.open(__magic_name__ , '''rb''' , compression='''infer''' ) as f:
a__ = f.read()
assert exported_content == original_content
| 158
| 0
|
from collections import deque
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = len(__lowerCAmelCase )
snake_case__ = deque()
snake_case__ = [False for _ in range(__lowerCAmelCase )]
snake_case__ = [-1 for _ in range(__lowerCAmelCase )]
snake_case__ = index_of[:]
def strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = index # the number when this node is seen
snake_case__ = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
snake_case__ = True
for w in g[v]:
if index_of[w] == -1:
snake_case__ = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case__ = []
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
while w != v:
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
snake_case__ = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
lowerCamelCase__ : Tuple = 7
lowerCamelCase__ : Optional[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCamelCase__ : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCamelCase__ : int = [(u, v) for u, v in zip(source, target)]
lowerCamelCase__ : List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 33
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=[] ):
SCREAMING_SNAKE_CASE = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
SCREAMING_SNAKE_CASE = np.pad(UpperCAmelCase__ , mode="linear_ramp" , pad_width=UpperCAmelCase__ , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ):
return max(UpperCAmelCase__ , min(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __lowerCamelCase (UpperCAmelCase__ : [int] , UpperCAmelCase__ : [int] , UpperCAmelCase__ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCamelCase (UpperCAmelCase__ : [int] , UpperCAmelCase__ : int , UpperCAmelCase__ : [int] ):
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE = clamp_rect(UpperCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCAmelCase__ , (original_slice, 0) )
return result
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE = tile.crop(UpperCAmelCase__ )
return tile
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = n % d
return n - divisor
class lowercase ( a ):
def __init__( self : Tuple , _UpperCamelCase : AutoencoderKL , _UpperCamelCase : CLIPTextModel , _UpperCamelCase : CLIPTokenizer , _UpperCamelCase : UNetaDConditionModel , _UpperCamelCase : DDPMScheduler , _UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCamelCase : int = 350 , ) -> Dict:
'''simple docstring'''
super().__init__(
vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , max_noise_level=_UpperCamelCase , )
def __snake_case( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , **_UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE = add_overlap_rect(_UpperCamelCase , _UpperCamelCase , image.size )
SCREAMING_SNAKE_CASE = image.crop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE = max(0 , _UpperCamelCase )
SCREAMING_SNAKE_CASE = squeeze_tile(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = to_input.size
SCREAMING_SNAKE_CASE = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = super(_UpperCamelCase , self ).__call__(image=_UpperCamelCase , **_UpperCamelCase ).images[0]
SCREAMING_SNAKE_CASE = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = unsqueeze_tile(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
SCREAMING_SNAKE_CASE = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_UpperCamelCase ) , mode="L" , )
final_image.paste(
_UpperCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , _UpperCamelCase : Union[str, List[str]] , _UpperCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _UpperCamelCase : int = 75 , _UpperCamelCase : float = 9.0 , _UpperCamelCase : int = 50 , _UpperCamelCase : Optional[Union[str, List[str]]] = None , _UpperCamelCase : Optional[int] = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : Optional[torch.Generator] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 128 , _UpperCamelCase : int = 32 , _UpperCamelCase : int = 32 , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE = tcx * tcy
SCREAMING_SNAKE_CASE = 0
for y in range(_UpperCamelCase ):
for x in range(_UpperCamelCase ):
self._process_tile(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , prompt=_UpperCamelCase , num_inference_steps=_UpperCamelCase , guidance_scale=_UpperCamelCase , noise_level=_UpperCamelCase , negative_prompt=_UpperCamelCase , num_images_per_prompt=_UpperCamelCase , eta=_UpperCamelCase , generator=_UpperCamelCase , latents=_UpperCamelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def __lowerCamelCase ():
# Run a demo
SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-x4-upscaler"
SCREAMING_SNAKE_CASE = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCAmelCase__ , revision="fp16" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipe.to("cuda" )
SCREAMING_SNAKE_CASE = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(UpperCAmelCase__ : List[Any] ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("diffusers_library_progress.jpg" )
SCREAMING_SNAKE_CASE = pipe(image=UpperCAmelCase__ , prompt="Black font, white background, vector" , noise_level=4_0 , callback=UpperCAmelCase__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 403
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
if len(_lowerCAmelCase ) <= 1:
return lst
lowercase__ : Dict = 1
while i < len(_lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase__ , lowercase__ : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase__ : int = 1
return lst
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase : List[str] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AudioLDMPipeline
__UpperCAmelCase : Optional[int] = TEXT_TO_AUDIO_PARAMS
__UpperCAmelCase : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCAmelCase : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __lowercase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=UpperCamelCase_ ,)
_a : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=UpperCamelCase_ ,set_alpha_to_one=UpperCamelCase_ ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Optional[Any] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,)
_a : Any = ClapTextModelWithProjection(UpperCamelCase_ )
_a : List[Any] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 )
_a : Any = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=UpperCamelCase_ ,)
_a : Dict = SpeechTaHifiGan(UpperCamelCase_ )
_a : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __lowercase ( self : str ,_a : List[str] ,_a : Union[str, Any]=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_a : Dict = torch.manual_seed(UpperCamelCase_ )
else:
_a : Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_a : List[str] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Optional[Any] = self.get_dummy_components()
_a : Optional[int] = AudioLDMPipeline(**UpperCamelCase_ )
_a : List[str] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
_a : int = audioldm_pipe(**UpperCamelCase_ )
_a : int = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 256
_a : List[str] = audio[:10]
_a : List[str] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowercase ( self : int ):
'''simple docstring'''
_a : str = self.get_dummy_components()
_a : List[str] = AudioLDMPipeline(**UpperCamelCase_ )
_a : Any = audioldm_pipe.to(UpperCamelCase_ )
_a : List[Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : Any = self.get_dummy_inputs(UpperCamelCase_ )
_a : List[str] = 3 * [inputs['prompt']]
# forward
_a : str = audioldm_pipe(**UpperCamelCase_ )
_a : str = output.audios[0]
_a : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
_a : List[str] = 3 * [inputs.pop('prompt' )]
_a : Union[str, Any] = audioldm_pipe.tokenizer(
UpperCamelCase_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=UpperCamelCase_ ,return_tensors='pt' ,)
_a : Optional[int] = text_inputs['input_ids'].to(UpperCamelCase_ )
_a : List[str] = audioldm_pipe.text_encoder(
UpperCamelCase_ ,)
_a : List[str] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_a : Dict = F.normalize(UpperCamelCase_ ,dim=-1 )
_a : str = prompt_embeds
# forward
_a : Dict = audioldm_pipe(**UpperCamelCase_ )
_a : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Dict = self.get_dummy_components()
_a : int = AudioLDMPipeline(**UpperCamelCase_ )
_a : Optional[int] = audioldm_pipe.to(UpperCamelCase_ )
_a : str = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
_a : Optional[Any] = 3 * ['this is a negative prompt']
_a : Any = negative_prompt
_a : Any = 3 * [inputs['prompt']]
# forward
_a : Optional[int] = audioldm_pipe(**UpperCamelCase_ )
_a : int = output.audios[0]
_a : str = self.get_dummy_inputs(UpperCamelCase_ )
_a : Optional[int] = 3 * [inputs.pop('prompt' )]
_a : str = []
for p in [prompt, negative_prompt]:
_a : Optional[int] = audioldm_pipe.tokenizer(
UpperCamelCase_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=UpperCamelCase_ ,return_tensors='pt' ,)
_a : Dict = text_inputs['input_ids'].to(UpperCamelCase_ )
_a : Dict = audioldm_pipe.text_encoder(
UpperCamelCase_ ,)
_a : Optional[int] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_a : Any = F.normalize(UpperCamelCase_ ,dim=-1 )
embeds.append(UpperCamelCase_ )
_a : List[str] = embeds
# forward
_a : List[Any] = audioldm_pipe(**UpperCamelCase_ )
_a : int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : str = self.get_dummy_components()
_a : int = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
_a : Optional[Any] = AudioLDMPipeline(**UpperCamelCase_ )
_a : Union[str, Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : str = self.get_dummy_inputs(UpperCamelCase_ )
_a : str = 'egg cracking'
_a : List[str] = audioldm_pipe(**UpperCamelCase_ ,negative_prompt=UpperCamelCase_ )
_a : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 256
_a : List[Any] = audio[:10]
_a : List[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Tuple = self.get_dummy_components()
_a : Tuple = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
_a : Any = AudioLDMPipeline(**UpperCamelCase_ )
_a : Tuple = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : Dict = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
_a : Optional[Any] = audioldm_pipe(UpperCamelCase_ ,num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_a : Dict = 2
_a : Optional[int] = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_a : Tuple = 2
_a : Dict = audioldm_pipe(UpperCamelCase_ ,num_inference_steps=2 ,num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_a : List[Any] = 2
_a : Dict = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Union[str, Any] = self.get_dummy_components()
_a : Any = AudioLDMPipeline(**UpperCamelCase_ )
_a : Union[str, Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : int = audioldm_pipe.vocoder.config.sampling_rate
_a : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
_a : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 ,**UpperCamelCase_ )
_a : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.016
_a : int = audioldm_pipe(audio_length_in_s=0.032 ,**UpperCamelCase_ )
_a : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.032
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.get_dummy_components()
_a : Optional[Any] = AudioLDMPipeline(**UpperCamelCase_ )
_a : Optional[int] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : Optional[Any] = ['hey']
_a : Union[str, Any] = audioldm_pipe(UpperCamelCase_ ,num_inference_steps=1 )
_a : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
_a : str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_a : str = SpeechTaHifiGan(UpperCamelCase_ ).to(UpperCamelCase_ )
_a : List[Any] = audioldm_pipe(UpperCamelCase_ ,num_inference_steps=1 )
_a : Optional[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __lowercase ( self : str ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase_ )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : str ,_a : int ,_a : Any="cpu" ,_a : str=torch.floataa ,_a : List[Any]=0 ):
'''simple docstring'''
_a : int = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_a : Optional[int] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 8, 128, 16) )
_a : Union[str, Any] = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ ,dtype=UpperCamelCase_ )
_a : List[Any] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
_a : int = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : Tuple = self.get_inputs(UpperCamelCase_ )
_a : str = 25
_a : int = audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 8_1920
_a : Optional[Any] = audio[7_7230:7_7240]
_a : Union[str, Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_a : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
_a : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_a : List[Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_a : Any = self.get_inputs(UpperCamelCase_ )
_a : Dict = audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 8_1920
_a : Optional[int] = audio[2_7780:2_7790]
_a : Optional[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_a : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 229
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a ( lowercase ):
UpperCamelCase : Union[str, Any] = """bert-generation"""
def __init__( self , UpperCamelCase_=50_358 , UpperCamelCase_=1_024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4_096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : Dict = use_cache
| 110
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowercase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_lowercase = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self , __A ) -> int:
return FSMTTokenizer.from_pretrained(__A )
def _snake_case ( self , __A ) -> Tuple:
SCREAMING_SNAKE_CASE_ : int =FSMTForConditionalGeneration.from_pretrained(__A ).to(__A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def _snake_case ( self , __A , __A ) -> List[Any]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE_ : int =F'facebook/wmt19-{pair}'
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_tokenizer(__A )
SCREAMING_SNAKE_CASE_ : Dict =self.get_model(__A )
SCREAMING_SNAKE_CASE_ : List[str] =bleu_data[pair]['''src''']
SCREAMING_SNAKE_CASE_ : Dict =bleu_data[pair]['''tgt''']
SCREAMING_SNAKE_CASE_ : str =tokenizer(__A , return_tensors='''pt''' , truncation=__A , padding='''longest''' ).to(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE_ : int =tokenizer.batch_decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =calculate_bleu(__A , __A )
print(__A )
self.assertGreaterEqual(scores['''bleu'''] , __A )
| 431
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
SCREAMING_SNAKE_CASE_ : Any =Dataset.from_dict(UpperCAmelCase_ )
return dataset
class lowercase_ ( A ):
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : int =get_dataset()
SCREAMING_SNAKE_CASE_ : Any =make_duplicate_clusters(__A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =get_dataset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =deduplicate_dataset(__A )
self.assertEqual(len(__A ) , 2 )
print(__A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __A )
| 431
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [[0 for _ in range(_UpperCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _UpperCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A : str = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
A : Any = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 636
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = nn.ModuleList(__a )
def snake_case ( self , __a , __a , __a , __a , __a , __a = None , __a = None , __a = None , __a = None , __a = False , __a = True , ):
for i, (image, scale, controlnet) in enumerate(zip(__a , __a , self.nets ) ):
__lowerCAmelCase , __lowerCAmelCase = controlnet(
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , )
# merge samples
if i == 0:
__lowerCAmelCase , __lowerCAmelCase = down_samples, mid_sample
else:
__lowerCAmelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__a , __a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case ( self , __a , __a = True , __a = None , __a = False , __a = None , ):
__lowerCAmelCase = 0
__lowerCAmelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__a , is_main_process=__a , save_function=__a , safe_serialization=__a , variant=__a , )
idx += 1
__lowerCAmelCase = model_path_to_save + f"_{idx}"
@classmethod
def snake_case ( cls , __a , **__a ):
__lowerCAmelCase = 0
__lowerCAmelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCAmelCase = pretrained_model_path
while os.path.isdir(__a ):
__lowerCAmelCase = ControlNetModel.from_pretrained(__a , **__a )
controlnets.append(__a )
idx += 1
__lowerCAmelCase = pretrained_model_path + f"_{idx}"
logger.info(f"{len(__a )} controlnets loaded from {pretrained_model_path}." )
if len(__a ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(__a )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(__a )
| 636
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : List[Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
__A : Tuple = dict(zip(_A , range(len(_A ) ) ) )
__A : Any = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
def UpperCAmelCase_ ( self , _A , _A=False , _A=20 , _A=5 ):
__A : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_A )) for i in range(len(_A ) )]
__A : Tuple = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__A : List[Any] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__A : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__A : str = [t[0] for t in toks]
# Ensure consistency
__A : List[str] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__A : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__A : Tuple = ' ' + output_txt
__A : Optional[Any] = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def UpperCAmelCase_ ( self , **_A ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
__A : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
__A : List[Any] = tokenizer('m xxx ɪ' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
__A : Tuple = tokenizer('m aaa ɪ ccc' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__A : int = tokenizer('maɪ c' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [3, 200] ) # mai should be <unk> (=3)
def UpperCAmelCase_ ( self ):
__A : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__A : int = 'Hello how are you'
__A : Optional[int] = tokenizer.phonemize(_A , phonemizer_lang='en-us' )
self.assertEqual(_A , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase_ ( self ):
__A : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__A : Tuple = 'Hello how are you'
__A : Optional[int] = tokenizer.phonemize(_A , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def UpperCAmelCase_ ( self ):
__A : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__A : Optional[int] = 'Hello how are you'
__A : Dict = tokenizer.phonemize(_A , phonemizer_lang='en-us' )
__A : Any = tokenizer.decode(tokenizer(_A ).input_ids )
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__A : str = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__A : List[str] = tokenizer.decode(sample_ids[0] )
__A : Any = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__A : str = 'Hello how are you'
__A : Any = tokenizer.phonemize(_A , phonemizer_lang='en-us' )
self.assertEqual(_A , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase_ ( self ):
__A : str = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__A : Tuple = 'Hello how are you'
__A : Dict = tokenizer.phonemize(_A , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
__A : str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__A : Dict = tokenizer.decode(sample_ids[0] )
__A : Any = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
__A : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_A )
__A : List[str] = tokenizer.batch_decode(_A , filter_word_delimiter_token=_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__A : Dict = 'Hello how are you'
__A : List[str] = tokenizer.phonemize(_A , phonemizer_lang='en-us' )
__A : Dict = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__A : Any = 'Hello how are you'
__A : Dict = tokenizer.phonemize(_A , phonemizer_lang='en-us' )
__A : Optional[Any] = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=_A )
__A : Optional[Any] = 'Hello how are you'
__A : List[str] = tokenizer(_A , phonemizer_lang='en-us' ).input_ids
__A : str = tokenizer(_A , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(_A , _A )
__A : List[Any] = tokenizer.decode(_A )
__A : Optional[int] = tokenizer.decode(_A )
self.assertEqual(_A , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(_A , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__A : Union[str, Any] = 'Hello how Are you'
__A : Union[str, Any] = 'hello how are you'
__A : List[str] = tokenizer(_A ).input_ids
__A : Optional[int] = tokenizer(_A ).input_ids
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
__A : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__A : Any = tokenizer.batch_decode(_A )
self.assertEqual(_A , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase_ ( _A , _A ):
__A : str = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__A : Tuple = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__A : int = tokenizer.decode(_A , output_char_offsets=_A , filter_word_delimiter_token=_A )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(_A , _A ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(_A , _A ):
self.assertTrue(isinstance(_A , _A ) )
self.assertTrue(isinstance(outputs_list[0] , _A ) )
# transform list to ModelOutput
__A : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(_A , _A ):
if isinstance(_A , _A ):
[recursive_check(_A , _A ) for la, la in zip(_A , _A )]
self.assertEqual(_A , _A )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
__A : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__A : List[str] = tokenizer.batch_decode(_A , output_char_offsets=_A )
__A : Tuple = [tokenizer.decode(_A , output_char_offsets=_A ) for ids in sample_ids]
check_list_tuples_equal(_A , _A )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A : Optional[Any] = tokenizer.vocab_size
__A : List[str] = len(_A )
self.assertNotEqual(_A , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__A : int = ['aaaaa bbbbbb', 'cccccccccdddddddd']
__A : Tuple = tokenizer.add_tokens(_A )
__A : Optional[int] = tokenizer.vocab_size
__A : Union[str, Any] = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size + len(_A ) )
__A : str = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__A : List[str] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__A : Union[str, Any] = tokenizer.add_special_tokens(_A )
__A : Optional[Any] = tokenizer.vocab_size
__A : Optional[Any] = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size_a + len(_A ) )
__A : Any = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__A : Optional[Any] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A : Dict = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__A : Union[str, Any] = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(output['text'] , _A )
| 77
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 77
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCamelCase_ ( __snake_case ):
'''simple docstring'''
UpperCAmelCase__ = '''vivit'''
def __init__( self : str , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Optional[int]=[2, 16, 16] , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Optional[int]=3_072 , UpperCAmelCase__ : List[str]="gelu_fast" , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any=1e-06 , UpperCAmelCase__ : Any=True , **UpperCAmelCase__ : str , ) ->Union[str, Any]:
'''simple docstring'''
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = num_frames
A__ = tubelet_size
A__ = num_channels
A__ = qkv_bias
super().__init__(**UpperCAmelCase__)
| 87
|
from __future__ import annotations
def _lowercase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , SCREAMING_SNAKE_CASE_ )
else:
return binary_search(a_list[midpoint + 1 :] , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = input("Enter numbers separated by comma:\n").strip()
__snake_case = [int(item.strip()) for item in user_input.split(",")]
__snake_case = int(input("Enter the number to be found in the list:\n").strip())
__snake_case = "" if binary_search(sequence, target) else "not "
print(F'''{target} was {not_str}found in {sequence}''')
| 386
| 0
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase_ : str = [mem.copy() for i in range(6 )]
lowerCamelCase_ : str = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCamelCase_ : Union[str, Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCamelCase_ : Optional[Any] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCamelCase_ : Optional[int] = Text('''CPU''' , font_size=24 )
lowerCamelCase_ : List[Any] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = [mem.copy() for i in range(4 )]
lowerCamelCase_ : Tuple = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCamelCase_ : Tuple = Text('''GPU''' , font_size=24 )
lowerCamelCase_ : Any = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase_ )
lowerCamelCase_ : int = [mem.copy() for i in range(6 )]
lowerCamelCase_ : Union[str, Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCamelCase_ : Optional[int] = Text('''Model''' , font_size=24 )
lowerCamelCase_ : int = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase_ )
lowerCamelCase_ : Dict = []
for i, rect in enumerate(UpperCamelCase_ ):
rect.set_stroke(UpperCamelCase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCamelCase_ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=UpperCamelCase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCamelCase_ , buff=0.0 )
self.add(UpperCamelCase_ )
cpu_targs.append(UpperCamelCase_ )
lowerCamelCase_ : Tuple = [mem.copy() for i in range(6 )]
lowerCamelCase_ : int = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
lowerCamelCase_ : Tuple = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCamelCase_ : List[Any] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , aligned_edge=UpperCamelCase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCamelCase_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ : int = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : List[Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCamelCase_ : Optional[int] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ ) , Write(UpperCamelCase_ ) )
self.play(Write(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) )
lowerCamelCase_ : int = []
lowerCamelCase_ : Optional[Any] = []
for i, rect in enumerate(UpperCamelCase_ ):
lowerCamelCase_ : List[str] = fill.copy().set_fill(UpperCamelCase_ , opacity=0.7 )
target.move_to(UpperCamelCase_ )
first_animations.append(GrowFromCenter(UpperCamelCase_ , run_time=1 ) )
lowerCamelCase_ : Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) )
self.play(*UpperCamelCase_ )
self.play(*UpperCamelCase_ )
self.wait()
| 418
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
A = StableDiffusionSAGPipeline
A = TEXT_TO_IMAGE_PARAMS
A = TEXT_TO_IMAGE_BATCH_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = False
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCamelCase_ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase_ : Dict = CLIPTextModel(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCamelCase_ : Any = torch.manual_seed(UpperCamelCase_ )
else:
lowerCamelCase_ : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCamelCase_ : Tuple = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCamelCase_ : int = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : str = '''.'''
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : int = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : Dict = output.images
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Tuple = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : List[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : Optional[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : Tuple = '''.'''
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : Optional[int] = output.images
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Dict = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : Optional[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : List[str] = '''.'''
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowerCamelCase_ : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 418
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : int = JukeboxTokenizer
_a : Optional[int] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 618
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_UpperCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sgugger/tiny-distilbert-classification'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'patrickvonplaten/t5-tiny-random'
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() )
| 618
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : list, UpperCAmelCase__ : int, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0 ) ->int:
A__ : Dict = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 42
snake_case_ = 42
def __init__( self : Union[str, Any] , snake_case : UNetaDModel , snake_case : KarrasVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self : Any , snake_case : int = 1 , snake_case : int = 50 , snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case : Optional[str] = "pil" , snake_case : bool = True , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = self.unet.config.sample_size
A__ : Dict = (batch_size, 3, img_size, img_size)
A__ : Tuple = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ : Union[str, Any] = randn_tensor(snake_case , generator=snake_case , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ : Optional[Any] = self.scheduler.schedule[t]
A__ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ , A__ : Dict = self.scheduler.add_noise_to_input(snake_case , snake_case , generator=snake_case )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , snake_case )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : Dict = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ : Tuple = self.scheduler.step_correct(
snake_case , snake_case , snake_case , snake_case , step_output.prev_sample , step_output["""derivative"""] , )
A__ : Tuple = step_output.prev_sample
A__ : Dict = (sample / 2 + 0.5).clamp(0 , 1 )
A__ : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Optional[Any] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 498
| 1
|
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
'''simple docstring'''
A : List[str] = None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE : str = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key], __lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(__lowercase, 'feat_extract.json' )
feat_extract_first.to_json_file(__lowercase )
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class.from_json_file(__lowercase )
self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : int = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class.from_pretrained(__lowercase )
self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class()
self.assertIsNotNone(__lowercase )
| 28
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1024 ):
"""simple docstring"""
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_SCREAMING_SNAKE_CASE ):
return num
if __name__ == "__main__":
lowerCamelCase__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 225
| 0
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCamelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase_ ( self : Any , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = """lower newer"""
UpperCamelCase = """lower newer"""
return input_text, output_text
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase = """lower"""
UpperCamelCase = ["""low""", """er</w>"""]
UpperCamelCase = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCamelCase = tokens + ["""<unk>"""]
UpperCamelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 181
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__snake_case = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__snake_case = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
__snake_case = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase ( __snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : Optional[int]=None , __magic_name__ : Dict="<s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : Optional[int]="<s>" , __magic_name__ : int="<unk>" , __magic_name__ : str="<pad>" , __magic_name__ : List[str]="<mask>" , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase = src_lang if src_lang is not None else """en_XX"""
UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self : List[str] , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : int , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase = src_lang
UpperCamelCase = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = tgt_lang_id
return inputs
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self : int , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self : Any , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
UpperCamelCase = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 181
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = None , ) -> Optional[int]:
"""simple docstring"""
if config_name_or_path is None:
__A = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__A = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__A = question_encoder_name_or_path
__A = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__A = RagConfig.from_pretrained(__UpperCamelCase )
__A = AutoConfig.from_pretrained(__UpperCamelCase )
__A = AutoConfig.from_pretrained(__UpperCamelCase )
__A = gen_config
__A = question_encoder_config
__A = model_class.from_pretrained_question_encoder_generator(
__UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
rag_model.save_pretrained(__UpperCamelCase )
# Sanity check.
model_class.from_pretrained(__UpperCamelCase )
# Save tokenizers.
__A = AutoTokenizer.from_pretrained(__UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
__A = AutoTokenizer.from_pretrained(__UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
SCREAMING_SNAKE_CASE :Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 55
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9
| 0
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowercase_ = {
'facebook/blenderbot_small-90M': 5_1_2,
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = BlenderbotSmallTokenizer
def __init__( self: Optional[Any] , a: str=None , a: str=None , a: int="<|endoftext|>" , a: List[str]="<|endoftext|>" , a: Optional[Any]="<|endoftext|>" , a: Union[str, Any]=False , a: List[Any]=True , **a: str , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A , merges=__A , add_prefix_space=__A , trim_offsets=__A , ) , bos_token=__A , eos_token=__A , unk_token=__A , **__A , )
__lowerCamelCase : List[Any] = add_prefix_space
def _snake_case ( self: Any , a: Optional[Any] , a: Optional[int]=None ):
__lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self: List[Any] , a: List[int] , a: Optional[List[int]] = None ):
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 711
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = '▁'
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BigBirdTokenizer
__snake_case = BigBirdTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: int ):
super().setUp()
__lowerCamelCase : str = self.tokenizer_class(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self: str ):
__lowerCamelCase : int = '<s>'
__lowerCamelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(a ) , 1004 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self: Dict ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCamelCase : str = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = tokenizer.tokenize(a )
__lowerCamelCase : List[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase : str = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCamelCase : Tuple = tokenizer.encode(a )
__lowerCamelCase : str = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = BigBirdTokenizer(a , keep_accents=a )
__lowerCamelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , )
__lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCamelCase : int = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _snake_case ( self: Dict ):
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Tuple = 'Hello World!'
__lowerCamelCase : Optional[int] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _snake_case ( self: Dict ):
__lowerCamelCase : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
__lowerCamelCase : int = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def _snake_case ( self: List[Any] ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowerCamelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase : int = ' '.join(a )
__lowerCamelCase : List[str] = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a )
__lowerCamelCase : Optional[int] = BigBirdConfig(attention_type='original_full' )
__lowerCamelCase : Any = BigBirdModel(a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def _snake_case ( self: List[Any] ):
# fmt: off
__lowerCamelCase : Optional[Any] = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 230
| 0
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 343
|
from typing import Dict, Optional
import numpy as np
import datasets
__A : Any = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
__A : int = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
__A : Tuple = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False, ) -> List[str]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase : Union[str, Any] = new_id
# turn into Numpy arrays
lowerCAmelCase : int = np.array(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = np.array(_UpperCAmelCase )
if reduce_labels:
lowerCAmelCase : Union[str, Any] = 255
lowerCAmelCase : Any = label - 1
lowerCAmelCase : Tuple = 255
lowerCAmelCase : Any = label != ignore_index
lowerCAmelCase : Dict = np.not_equal(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Optional[int] = pred_label[mask]
lowerCAmelCase : Optional[int] = np.array(_UpperCAmelCase )[mask]
lowerCAmelCase : str = pred_label[pred_label == label]
lowerCAmelCase : Optional[Any] = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1) )[0]
lowerCAmelCase : str = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1) )[0]
lowerCAmelCase : Optional[Any] = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1) )[0]
lowerCAmelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False, ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.zeros((num_labels,), dtype=np.floataa )
lowerCAmelCase : Dict = np.zeros((num_labels,), dtype=np.floataa )
lowerCAmelCase : int = np.zeros((num_labels,), dtype=np.floataa )
lowerCAmelCase : int = np.zeros((num_labels,), dtype=np.floataa )
for result, gt_seg_map in zip(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = intersect_and_union(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, ) -> Any:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = total_intersect_and_union(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# compute metrics
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase : Optional[Any] = total_area_intersect / total_area_union
lowerCAmelCase : Optional[int] = total_area_intersect / total_area_label
lowerCAmelCase : Union[str, Any] = np.nanmean(_UpperCAmelCase )
lowerCAmelCase : Any = np.nanmean(_UpperCAmelCase )
lowerCAmelCase : Dict = all_acc
lowerCAmelCase : Tuple = iou
lowerCAmelCase : Optional[int] = acc
if nan_to_num is not None:
lowerCAmelCase : int = {metric: np.nan_to_num(_UpperCAmelCase, nan=_UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowercase__ ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : Optional[int] = mean_iou(
results=UpperCAmelCase_ , gt_seg_maps=UpperCAmelCase_ , num_labels=UpperCAmelCase_ , ignore_index=UpperCAmelCase_ , nan_to_num=UpperCAmelCase_ , label_map=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ , )
return iou_result
| 343
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : List[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = ['''BeitFeatureExtractor''']
__magic_name__ : int = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 602
|
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : str=None ):
"""simple docstring"""
_lowercase = data
_lowercase = None
def __repr__( self : Optional[int] ):
"""simple docstring"""
_lowercase = []
_lowercase = self
while temp:
string_rep.append(f"""{temp.data}""" )
_lowercase = temp.next
return "->".join(__A )
def A__ ( A_ ) -> Any:
if not elements_list:
raise Exception("The Elements List is empty" )
_lowercase = _lowercase = Node(elements_list[0] )
for i in range(1 , len(A_ ) ):
_lowercase = Node(elements_list[i] )
_lowercase = current.next
return head
def A__ ( A_ ) -> None:
if head_node is not None and isinstance(A_ , A_ ):
print_reverse(head_node.next )
print(head_node.data )
def A__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_lowercase = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A_ )
print("Elements in Reverse:" )
print_reverse(A_ )
if __name__ == "__main__":
main()
| 602
| 1
|
import unittest
import numpy as np
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = None, ):
_SCREAMING_SNAKE_CASE : Any = np.shape(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = np.shape(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.shape(__lowerCamelCase )
if shape_a[0] != shape_b[0]:
_SCREAMING_SNAKE_CASE : int = (
"Expected the same number of rows for A and B. "
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__lowerCamelCase )
if shape_b[1] != shape_c[1]:
_SCREAMING_SNAKE_CASE : str = (
"Expected the same number of columns for B and C. "
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = pseudo_inv
if a_inv is None:
try:
_SCREAMING_SNAKE_CASE : Tuple = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_SCREAMING_SNAKE_CASE : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_SCREAMING_SNAKE_CASE : Dict = np.array([[2, 1], [6, 3]] )
_SCREAMING_SNAKE_CASE : List[str] = schur_complement(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.block([[a, b], [b.T, c]] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.linalg.det(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.det(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = np.linalg.det(__lowerCamelCase )
self.assertAlmostEqual(__lowerCamelCase , det_a * det_s )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCamelCase ):
schur_complement(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_SCREAMING_SNAKE_CASE : str = np.array([[0, 3], [3, 0], [2, 3]] )
_SCREAMING_SNAKE_CASE : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCamelCase ):
schur_complement(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 249
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase ): # This function is recursive
_SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_SCREAMING_SNAKE_CASE : List[str] = array[0]
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Optional[Any] = [element for element in array[i:] if element >= array[i]]
_SCREAMING_SNAKE_CASE : Dict = longest_subsequence(__lowerCamelCase )
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = temp_array
else:
i += 1
_SCREAMING_SNAKE_CASE : int = [element for element in array[1:] if element >= pivot]
_SCREAMING_SNAKE_CASE : Any = [pivot, *longest_subsequence(__lowerCamelCase )]
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['DPTFeatureExtractor']
SCREAMING_SNAKE_CASE_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a :
"""simple docstring"""
A__ : int = MBartConfig
A__ : Union[str, Any] = {}
A__ : Optional[int] = "gelu"
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=20 , snake_case_=2 , snake_case_=1 , snake_case_=0 , ) -> Dict:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_mbart_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def __A ( self , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = TFMBartModel(config=snake_case_ ).get_decoder()
_UpperCAmelCase = inputs_dict["input_ids"]
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict["attention_mask"][:1, :]
_UpperCAmelCase = inputs_dict["head_mask"]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
_UpperCAmelCase = past_key_values[1]
def A__ ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A__ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A__ : Dict = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : Union[str, Any] = True
A__ : Union[str, Any] = False
A__ : int = False
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __A ( self ) -> int:
_UpperCAmelCase = TFMBartModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ )
def __A ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __A ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
A__ : str = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
A__ : Dict = "facebook/mbart-large-en-ro"
@cached_property
def __A ( self ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **snake_case_ ) -> Dict:
_UpperCAmelCase = self.translate_src_text(**snake_case_ )
self.assertListEqual(self.expected_text , snake_case_ )
def __A ( self , **snake_case_ ) -> str:
_UpperCAmelCase = self.tokenizer(self.src_text , **snake_case_ , return_tensors="tf" )
_UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_UpperCAmelCase = self.tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
return generated_words
@slow
def __A ( self ) -> Optional[int]:
self._assert_generated_batch_equal_expected()
| 579
| 0
|
"""simple docstring"""
import numpy as np
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return np.where(vector > 0, lowerCamelCase, (alpha * (np.exp(lowerCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621
|
"""simple docstring"""
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return abs(lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a, lowerCamelCase )
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = y, x % y
return abs(lowerCamelCase )
def lowercase__ ( ):
try:
_SCREAMING_SNAKE_CASE : Union[str, Any] = input('Enter two integers separated by comma (,): ' ).split(',' )
_SCREAMING_SNAKE_CASE : Dict = int(nums[0] )
_SCREAMING_SNAKE_CASE : Dict = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(lowerCamelCase, lowerCamelCase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase, lowerCamelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 621
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase_ :
__a : Any = 42
__a : Optional[Any] = None
# Automatically constructed
__a : Optional[Any] = "dict"
__a : Dict = None
__a : Dict = field(default="Translation" , init=_A , repr=_A )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase_ :
__a : Dict = None
__a : int = None
__a : List[str] = None
# Automatically constructed
__a : List[str] = "dict"
__a : str = None
__a : str = field(default="TranslationVariableLanguages" , init=_A , repr=_A )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : Any = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ ,UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def snake_case ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 710
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685
| 0
|
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A_ = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __UpperCAmelCase ( UpperCAmelCase=None )-> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
lowercase = subparsers.add_parser('''tpu-config''', description=_description )
else:
lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''', description=_description )
# Core arguments
lowercase = parser.add_argument_group(
'''Config Arguments''', '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''', type=_lowerCAmelCase, default=_lowerCAmelCase, help='''Path to the config file to use for accelerate.''', )
config_args.add_argument(
'''--tpu_name''', default=_lowerCAmelCase, help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''', )
config_args.add_argument(
'''--tpu_zone''', default=_lowerCAmelCase, help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''', )
lowercase = parser.add_argument_group('''TPU Arguments''', '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''', action='''store_true''', help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''', )
pod_args.add_argument(
'''--command_file''', default=_lowerCAmelCase, help='''The path to the file containing the commands to run on the pod on startup.''', )
pod_args.add_argument(
'''--command''', action='''append''', nargs='''+''', help='''A command to run on the pod. Can be passed multiple times.''', )
pod_args.add_argument(
'''--install_accelerate''', action='''store_true''', help='''Whether to install accelerate on the pod. Defaults to False.''', )
pod_args.add_argument(
'''--accelerate_version''', default='''latest''', help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''', )
pod_args.add_argument(
'''--debug''', action='''store_true''', help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def __UpperCAmelCase ( UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ), _lowerCAmelCase ):
lowercase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file, '''r''' ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], _lowerCAmelCase ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase = '''; '''.join(_lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_lowerCAmelCase )}' )
return
subprocess.run(_lowerCAmelCase )
print('''Successfully setup pod.''' )
def __UpperCAmelCase ( )-> Any:
"""simple docstring"""
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(_lowerCAmelCase )
| 604
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
__lowerCAmelCase = nn.Parameter(_lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
__lowerCAmelCase = nn.Parameter(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase = np.asarray(weights[0] )
__lowerCAmelCase = np.asarray(weights[1] )
__lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase = np.asarray(weights[0] )
__lowerCAmelCase = np.asarray(weights[1] )
__lowerCAmelCase = np.asarray(weights[2] )
__lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCAmelCase ).view(-1 , _lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# layernorm 1
__lowerCAmelCase = weights[0][0][0]
__lowerCAmelCase = np.asarray(layer_norm_a[0] )
__lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# lsh weights + output
__lowerCAmelCase = weights[0][1]
if len(_lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
else:
set_layer_weights_in_torch_local(_lowerCAmelCase , torch_block.attention , _lowerCAmelCase )
# intermediate weighs
__lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowerCAmelCase ) == 4:
__lowerCAmelCase = intermediate_weights[2]
# layernorm 2
__lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# intermediate dense
__lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
# intermediate out
__lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
__lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# reformer model
__lowerCAmelCase = torch_model.reformer
# word embeds
__lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowerCAmelCase ) , )
if isinstance(weights[3] , _lowerCAmelCase ):
__lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
__lowerCAmelCase = nn.Parameter(torch.tensor(_lowerCAmelCase ) )
__lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# output layer norm
__lowerCAmelCase = np.asarray(weights[7][0] )
__lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowerCAmelCase ) , torch.tensor(_lowerCAmelCase ) , )
# output embeddings
__lowerCAmelCase = np.asarray(weights[9][0] )
__lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCAmelCase ) , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Initialise PyTorch model
__lowerCAmelCase = ReformerConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCAmelCase = ReformerModelWithLMHead(_lowerCAmelCase )
with open(_lowerCAmelCase , """rb""" ) as f:
__lowerCAmelCase = pickle.load(_lowerCAmelCase )["""weights"""]
set_model_weights_in_torch(_lowerCAmelCase , _lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 465
| 0
|
"""simple docstring"""
UpperCAmelCase = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 700
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : List[str] = VQModel
__A : Any = "sample"
@property
def __snake_case ( self : int , snake_case__ : int=(3_2, 3_2) ):
'''simple docstring'''
lowercase :Optional[int] = 4
lowercase :Tuple = 3
lowercase :List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return (3, 3_2, 3_2)
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowercase :Dict = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case__ )
lowercase :List[str] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase :List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase :int = image.to(snake_case__ )
with torch.no_grad():
lowercase :str = model(snake_case__ ).sample
lowercase :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase :Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
| 475
| 0
|
"""simple docstring"""
def A_ ( snake_case__ = 3 , snake_case__ = 7 , snake_case__ = 1_00_00_00 ) -> Tuple:
_UpperCamelCase :str = 0
_UpperCamelCase :Optional[int] = 1
for current_denominator in range(1 , limit + 1 ):
_UpperCamelCase :List[str] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCamelCase :int = current_numerator
_UpperCamelCase :Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 355
|
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCamelCase ( _UpperCAmelCase ):
def a_ ( self ):
UpperCamelCase : int = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def a_ ( self ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def a_ ( self ):
UpperCamelCase : List[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase : List[str] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def a_ ( self ):
UpperCamelCase : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self ):
UpperCamelCase : int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def a_ ( self ):
UpperCamelCase : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def a_ ( self ):
UpperCamelCase : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self ):
UpperCamelCase : Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def a_ ( self ):
import PIL.Image
UpperCamelCase : Any = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects:
UpperCamelCase : Union[str, Any] = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
UpperCamelCase , UpperCamelCase : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Tuple = pa.BufferReader(snake_case_ ) if isinstance(snake_case_ ,pa.Buffer ) else pa.memory_map(snake_case_ )
UpperCamelCase : Union[str, Any] = pa.ipc.open_stream(snake_case_ )
UpperCamelCase : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : List[str] ,snake_case_ : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=snake_case_ ,features=snake_case_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
UpperCamelCase , UpperCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCamelCase : Optional[Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase : Dict = pa.ipc.open_stream(snake_case_ )
UpperCamelCase : pa.Table = f.read_all()
UpperCamelCase : str = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case_ )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
with pytest.raises(snake_case_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=[1, 2] )
UpperCamelCase , UpperCamelCase : List[str] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 1_0] )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : str = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
with pytest.raises(snake_case_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=1_0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 1_0] )
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : int = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ ,writer_batch_size=snake_case_ ,hash_salt="""split_name""" ,check_duplicates=snake_case_ ,) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=2 )
UpperCamelCase , UpperCamelCase : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Any ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
UpperCamelCase : List[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
UpperCamelCase , UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Any = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
UpperCamelCase : Optional[Any] = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
UpperCamelCase , UpperCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def A_ ( snake_case_ : Optional[int] ,snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : int = pa.BufferOutputStream()
UpperCamelCase : Dict = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ ,schema=snake_case_ ,writer_batch_size=snake_case_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
UpperCamelCase , UpperCamelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase : Optional[int] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
UpperCamelCase : str = os.path.join(snake_case_ ,"""test.arrow""" )
with ArrowWriter(path=snake_case_ ,schema=pa.schema(snake_case_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
UpperCamelCase , UpperCamelCase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case_ ,metadata=writer._schema.metadata )
_check_output(snake_case_ ,1 )
def A_ ( snake_case_ : Any ):
'''simple docstring'''
if pa.types.is_list(snake_case_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def A_ ( snake_case_ : Optional[int] ,snake_case_ : Optional[Any] ):
'''simple docstring'''
if isinstance(lst[0] ,snake_case_ ):
change_first_primitive_element_in_list(lst[0] ,snake_case_ )
else:
UpperCamelCase : Optional[Any] = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" ,[(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A_ ( snake_case_ : Dict ,snake_case_ : Tuple ,snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : int = pa.array(TypedSequence(snake_case_ ,optimized_int_type=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" ,[
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] ,)
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A_ ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : List[Any] ):
'''simple docstring'''
# in range
UpperCamelCase : Union[str, Any] = pa.array(OptimizedTypedSequence(snake_case_ ,col=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCamelCase : Dict = copy.deepcopy(snake_case_ )
UpperCamelCase : Optional[int] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(snake_case_ ,snake_case_ )
UpperCamelCase : Any = pa.array(OptimizedTypedSequence(snake_case_ ,col=snake_case_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" ,[False, True] )
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=snake_case_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def A_ ( snake_case_ : Any ):
'''simple docstring'''
UpperCamelCase : Tuple = """mock://dataset-train.arrow"""
with ArrowWriter(path=snake_case_ ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(snake_case_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Dict = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCamelCase , UpperCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCamelCase : Union[str, Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase : pa.Table = pq.read_table(snake_case_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" ,[False, True] )
def A_ ( snake_case_ : Tuple ,snake_case_ : List[str] ):
'''simple docstring'''
import PIL.Image
UpperCamelCase : Tuple = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(snake_case_ ,format="""png""" )
UpperCamelCase : Dict = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case_ ,features=Features({"""image""": Image()} ) ,embed_local_files=snake_case_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
UpperCamelCase : Any = pa.BufferReader(output.getvalue() )
UpperCamelCase : pa.Table = pq.read_table(snake_case_ )
UpperCamelCase : Any = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] ,snake_case_ )
with open(snake_case_ ,"""rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = pa.schema([pa.field("""col_1""" ,pa.string() ,nullable=snake_case_ )] )
UpperCamelCase : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case_ ) as writer:
writer._build_writer(inferred_schema=snake_case_ )
assert writer._schema == pa.schema([pa.field("""col_1""" ,pa.string() )] )
| 499
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=16 , UpperCamelCase=[1, 2, 1] , UpperCamelCase=[2, 2, 4] , UpperCamelCase=2 , UpperCamelCase=2.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=10 , UpperCamelCase=8 , ):
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = window_size
_SCREAMING_SNAKE_CASE = mlp_ratio
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = drop_path_rate
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = use_absolute_embeddings
_SCREAMING_SNAKE_CASE = patch_norm
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = encoder_stride
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def lowercase ( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = SwinvaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCamelCase )
_SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = SwinvaForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
a : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a : Optional[Any] = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a : Any = False
a : Union[str, Any] = False
a : List[Any] = False
a : List[str] = False
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = SwinvaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCamelCase , embed_dim=37 )
def lowercase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def lowercase ( self ):
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def lowercase ( self ):
pass
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
_SCREAMING_SNAKE_CASE = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = config.window_size**2
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_SCREAMING_SNAKE_CASE = len(UpperCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_SCREAMING_SNAKE_CASE = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.hidden_states
_SCREAMING_SNAKE_CASE = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# Swinv2 has a different seq_length
_SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_SCREAMING_SNAKE_CASE = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = reshaped_hidden_states[0].shape
_SCREAMING_SNAKE_CASE = (
reshaped_hidden_states[0].view(UpperCamelCase , UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowercase ( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = SwinvaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(UpperCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_SCREAMING_SNAKE_CASE = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 493
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = process
_SCREAMING_SNAKE_CASE = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.dataset[i]
_SCREAMING_SNAKE_CASE = self.process(UpperCamelCase , **self.params )
return processed
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
_SCREAMING_SNAKE_CASE = loader
_SCREAMING_SNAKE_CASE = infer
_SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def lowercase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
# Convert ModelOutput to tuple first
_SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase , UpperCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(UpperCamelCase )
self._loader_batch_index += 1
return result
def lowercase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_SCREAMING_SNAKE_CASE = next(self.iterator )
_SCREAMING_SNAKE_CASE = self.infer(UpperCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE = processed
else:
_SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = len(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
_SCREAMING_SNAKE_CASE = processed
_SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
super().__init__(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def __iter__( self ):
_SCREAMING_SNAKE_CASE = iter(self.loader )
_SCREAMING_SNAKE_CASE = None
return self
def lowercase ( self ):
if self.subiterator is None:
_SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
_SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class lowerCAmelCase ( __UpperCAmelCase ):
def __iter__( self ):
_SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def lowercase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE = self.loader_batch_item()
_SCREAMING_SNAKE_CASE = item.pop("is_last" )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
while not is_last:
_SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE = processed
else:
_SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE = processed[key]
if isinstance(UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = len(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE = observed_batch_size
_SCREAMING_SNAKE_CASE = processed
_SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE = self.loader_batch_item()
_SCREAMING_SNAKE_CASE = item.pop("is_last" )
accumulator.append(UpperCamelCase )
if is_last:
return accumulator
else:
_SCREAMING_SNAKE_CASE = processed
_SCREAMING_SNAKE_CASE = item.pop("is_last" )
accumulator.append(UpperCamelCase )
return accumulator
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase ):
return self.dataset[i][self.key]
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = keya
_SCREAMING_SNAKE_CASE = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 493
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Tuple = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''YolosFeatureExtractor''']
a : str = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 639
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCAmelCase = None
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = 1
__UpperCAmelCase = None
__UpperCAmelCase = False
__UpperCAmelCase = None
__UpperCAmelCase = None
def A ( self ) -> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case_ ) for k, v in self.__dict__.items()} )
| 639
| 1
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __a ( self : Tuple ):
'''simple docstring'''
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__a = """A painting of a squirrel eating a burger"""
__a = jax.device_count()
__a = num_samples * [prompt]
__a = sd_pipe.prepare_inputs(UpperCAmelCase__ )
__a = replicate(UpperCAmelCase__ )
__a = shard(UpperCAmelCase__ )
__a = jax.random.PRNGKey(0 )
__a = jax.random.split(UpperCAmelCase__ , jax.device_count() )
__a = sd_pipe(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , num_inference_steps=2_5 , jit=UpperCAmelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self : List[str] ):
'''simple docstring'''
__a = """stabilityai/stable-diffusion-2"""
__a , __a = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCAmelCase__ , subfolder="""scheduler""" )
__a , __a = FlaxStableDiffusionPipeline.from_pretrained(
UpperCAmelCase__ , scheduler=UpperCAmelCase__ , revision="""bf16""" , dtype=jnp.bfloataa , )
__a = scheduler_params
__a = """A painting of a squirrel eating a burger"""
__a = jax.device_count()
__a = num_samples * [prompt]
__a = sd_pipe.prepare_inputs(UpperCAmelCase__ )
__a = replicate(UpperCAmelCase__ )
__a = shard(UpperCAmelCase__ )
__a = jax.random.PRNGKey(0 )
__a = jax.random.split(UpperCAmelCase__ , jax.device_count() )
__a = sd_pipe(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , num_inference_steps=2_5 , jit=UpperCAmelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 721
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :Union[str, Any] =LEDTokenizer
a_ :List[Any] =LEDTokenizerFast
a_ :Union[str, Any] =True
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
__a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a = {"""unk_token""": """<unk>"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __a ( self : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __a ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def __a ( self : Tuple ):
'''simple docstring'''
__a = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_torch
def __a ( self : Dict ):
'''simple docstring'''
__a = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE__ )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("""labels""" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE__ )
@require_torch
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def __a ( self : Any ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __a ( self : Tuple ):
'''simple docstring'''
__a = ["""A long paragraph for summarization."""]
__a = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
__a = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
__a = inputs["""input_ids"""]
__a = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __a ( self : Dict ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = ["""Summary of the text.""", """Another summary."""]
__a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
__a = [[0] * len(SCREAMING_SNAKE_CASE__ ) for x in encoded_output["""input_ids"""]]
__a = tokenizer.pad(SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = """A, <mask> AllenNLP sentence."""
__a = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
__a = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 201
| 0
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : Any=False )-> int:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Any = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : str = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Tuple = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : List[str] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> Dict:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> str:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> List[str]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> Optional[int]:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> List[str]:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> int:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__lowerCAmelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__lowerCAmelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> Any:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__lowerCAmelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : Optional[Any] ):
try:
import spacy # noqa F401
spacy.load(__lowerCAmelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(__lowerCAmelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__lowerCAmelCase ) )(__lowerCAmelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__lowerCAmelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__lowerCAmelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> str:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> List[str]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(__lowerCAmelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : List[Any] )-> Union[str, Any]:
'''simple docstring'''
def decorate(cls : Union[str, Any] ):
for name, fn in cls.__dict__.items():
if callable(__lowerCAmelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(__lowerCAmelCase )
setattr(cls , __lowerCAmelCase , __lowerCAmelCase )
return cls
return decorate
class lowerCAmelCase ( __lowercase):
pass
class lowerCAmelCase ( __lowercase):
__lowercase : Union[str, Any] = 0
__lowercase : Tuple = 1
__lowercase : Dict = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Any=1E-16 )-> Dict:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , **_lowerCamelCase : str ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__lowerCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __lowerCAmelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowerCAmelCase , **__lowerCAmelCase ) as tmp_dir:
try:
os.chdir(__lowerCAmelCase )
yield
finally:
os.chdir(__lowerCAmelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
return deepcopy(__lowerCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__lowerCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> List[str]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ):
try:
return func(*__lowerCAmelCase , **__lowerCAmelCase )
except HTTPError as err:
if str(__lowerCAmelCase ).startswith('''500''' ) or str(__lowerCAmelCase ).startswith('''502''' ):
pytest.xfail(str(__lowerCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __lowerCAmelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] )-> Tuple:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(__lowerCAmelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : int=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=1_80 , _lowerCamelCase : str=False , _lowerCamelCase : List[Any]=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
__snake_case = ''' '''.join(__lowerCAmelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , __lowerCAmelCase , 0 , re.M )
return int(__lowerCAmelCase )
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24
|
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE__ : List[Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __magic_name__ ( ) -> int:
__lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowerCamelCase = g.get_repo('''huggingface/transformers''' )
__lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
__lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 298
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_snake_case : Dict = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_snake_case : List[Any] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_snake_case : Any = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_snake_case : str = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return float((preds == labels).mean() )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="binary" ):
__snake_case : Union[str, Any] = simple_accuracy(__lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = {}
for id_pred, label in zip(__lowerCamelCase , __lowerCamelCase ):
__snake_case : int = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__snake_case : str = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__snake_case : Optional[Any] = [(pred, label)]
__snake_case , __snake_case : Dict = [], []
for question, preds_labels in question_map.items():
__snake_case , __snake_case : int = zip(*__lowerCamelCase )
__snake_case : Optional[Any] = fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average="macro" )
fas.append(__lowerCamelCase )
__snake_case : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__lowerCamelCase ) )
ems.append(__lowerCamelCase )
__snake_case : Tuple = float(sum(__lowerCamelCase ) / len(__lowerCamelCase ) )
__snake_case : Any = sum(__lowerCamelCase ) / len(__lowerCamelCase )
__snake_case : int = float(fa_score(y_true=__lowerCamelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __snake_case ( self : Any ) -> int:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase , lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase , lowerCamelCase , fa_avg="macro" )
elif self.config_name == "record":
__snake_case : Tuple = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
__snake_case : List[str] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(lowerCamelCase , lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase , lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 203
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : int ) -> bool:
__UpperCAmelCase: List[Any] = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase__ ( _lowercase : int = 5_0_0_0 ) -> int:
__UpperCAmelCase: Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowercase )]
for i, pentagonal_i in enumerate(_lowercase ):
for j in range(_lowercase , len(_lowercase ) ):
__UpperCAmelCase: Dict = pentagonal_nums[j]
__UpperCAmelCase: Optional[Any] = pentagonal_i + pentagonal_j
__UpperCAmelCase: str = pentagonal_j - pentagonal_i
if is_pentagonal(_lowercase ) and is_pentagonal(_lowercase ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 523
|
'''simple docstring'''
import numpy as np
def UpperCamelCase__ ( _lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 523
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = Dict[str, Any]
UpperCamelCase_ = List[Prediction]
@add_end_docstrings(lowercase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Any , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self : Dict , **UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict ={}
if "threshold" in kwargs:
lowercase : str =kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =load_image(UpperCAmelCase__ )
lowercase : Optional[Any] =torch.IntTensor([[image.height, image.width]] )
lowercase : int =self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
lowercase : int =self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
lowercase : str =target_size
return inputs
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Any =model_inputs.pop('''target_size''' )
lowercase : Union[str, Any] =self.model(**UpperCAmelCase__ )
lowercase : Any =outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
lowercase : List[str] =model_inputs['''bbox''']
return model_outputs
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=0.9 ):
'''simple docstring'''
lowercase : Dict =model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase : Union[str, Any] =target_size[0].tolist()
def unnormalize(UpperCAmelCase__ : Union[str, Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowercase : List[Any] =model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase : Optional[int] =[self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase : Dict =[unnormalize(UpperCAmelCase__ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
lowercase : List[str] =['''score''', '''label''', '''box''']
lowercase : Any =[dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for vals in zip(scores.tolist() , UpperCAmelCase__ , UpperCAmelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase : Tuple =self.image_processor.post_process_object_detection(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =raw_annotations[0]
lowercase : Dict =raw_annotation['''scores''']
lowercase : Optional[Any] =raw_annotation['''labels''']
lowercase : List[str] =raw_annotation['''boxes''']
lowercase : List[Any] =scores.tolist()
lowercase : str =[self.model.config.idalabel[label.item()] for label in labels]
lowercase : List[str] =[self._get_bounding_box(UpperCAmelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase : str =['''score''', '''label''', '''box''']
lowercase : Optional[int] =[
dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
lowercase : Union[str, Any] =box.int().tolist()
lowercase : Optional[Any] ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 701
|
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if "cls_token" in name:
__lowercase =name.replace('cls_token', 'vit.embeddings.cls_token' )
if "mask_token" in name:
__lowercase =name.replace('mask_token', 'decoder.mask_token' )
if "decoder_pos_embed" in name:
__lowercase =name.replace('decoder_pos_embed', 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
__lowercase =name.replace('pos_embed', 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__lowercase =name.replace('patch_embed.proj', 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase =name.replace('patch_embed.norm', 'vit.embeddings.norm' )
if "decoder_blocks" in name:
__lowercase =name.replace('decoder_blocks', 'decoder.decoder_layers' )
if "blocks" in name:
__lowercase =name.replace('blocks', 'vit.encoder.layer' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
__lowercase =name.replace('attn', 'attention.self' )
if "norm1" in name:
__lowercase =name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2', 'output.dense' )
if "decoder_embed" in name:
__lowercase =name.replace('decoder_embed', 'decoder.decoder_embed' )
if "decoder_norm" in name:
__lowercase =name.replace('decoder_norm', 'decoder.decoder_norm' )
if "decoder_pred" in name:
__lowercase =name.replace('decoder_pred', 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
__lowercase =name.replace('norm.weight', 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
__lowercase =name.replace('norm.bias', 'vit.layernorm.bias' )
return name
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : List[str] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase =orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
__lowercase =key.split('.' )
__lowercase =int(key_split[1] )
if "decoder_blocks" in key:
__lowercase =config.decoder_hidden_size
__lowercase ='decoder.decoder_layers.'
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[dim : dim * 2, :]
__lowercase =val[-dim:, :]
elif "bias" in key:
__lowercase =val[:dim]
__lowercase =val[dim : dim * 2]
__lowercase =val[-dim:]
else:
__lowercase =config.hidden_size
__lowercase ='vit.encoder.layer.'
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[dim : dim * 2, :]
__lowercase =val[-dim:, :]
elif "bias" in key:
__lowercase =val[:dim]
__lowercase =val[dim : dim * 2]
__lowercase =val[-dim:]
else:
__lowercase =val
return orig_state_dict
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : str ):
'''simple docstring'''
__lowercase =ViTMAEConfig()
if "large" in checkpoint_url:
__lowercase =10_24
__lowercase =40_96
__lowercase =24
__lowercase =16
elif "huge" in checkpoint_url:
__lowercase =14
__lowercase =12_80
__lowercase =51_20
__lowercase =32
__lowercase =16
__lowercase =ViTMAEForPreTraining(lowerCAmelCase__ )
__lowercase =torch.hub.load_state_dict_from_url(lowerCAmelCase__, map_location='cpu' )['model']
__lowercase =ViTMAEImageProcessor(size=config.image_size )
__lowercase =convert_state_dict(lowerCAmelCase__, lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
__lowercase ='https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
__lowercase =Image.open(requests.get(lowerCAmelCase__, stream=lowerCAmelCase__ ).raw )
__lowercase =ViTMAEImageProcessor(size=config.image_size )
__lowercase =image_processor(images=lowerCAmelCase__, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__lowercase =model(**lowerCAmelCase__ )
__lowercase =outputs.logits
if "large" in checkpoint_url:
__lowercase =torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
__lowercase =torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
__lowercase =torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3], lowerCAmelCase__, atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 119
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE : Dict = True
except ImportError:
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( lowerCAmelCase__: Namespace ) -> Optional[Any]:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class snake_case ( lowercase_ ):
"""simple docstring"""
@staticmethod
def a__ ( _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing', action='store_true', help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file', type=_lowercase, help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path', type=_lowercase, help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=_lowercase )
def __init__( self, _lowercase, _lowercase, _lowercase=None, *_lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = testing
SCREAMING_SNAKE_CASE_ = testing_file
SCREAMING_SNAKE_CASE_ = path
def a__ ( self ) -> Optional[int]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(_lowercase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
SCREAMING_SNAKE_CASE_ = (
Path(_lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
SCREAMING_SNAKE_CASE_ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowercase ) )
else:
with open(self._testing_file, 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ), no_input=_lowercase, extra_context=_lowercase, )
SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json', 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
SCREAMING_SNAKE_CASE_ = configuration['lowercase_modelname']
SCREAMING_SNAKE_CASE_ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"""{directory}/configuration.json""" )
SCREAMING_SNAKE_CASE_ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = 'Flax' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_lowercase, exist_ok=_lowercase )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""", exist_ok=_lowercase )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""", 'w' ):
pass
shutil.move(
f"""{directory}/__init__.py""", f"""{model_dir}/__init__.py""", )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""", f"""{model_dir}/configuration_{lowercase_model_name}.py""", )
def remove_copy_lines(_lowercase ):
with open(_lowercase, 'r' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
with open(_lowercase, 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""", f"""{model_dir}/modeling_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""", f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""", f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""", f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""", )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""", f"""{model_dir}/tokenization_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""", f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""", )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowercase, _lowercase, _lowercase ):
# Create temp file
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mkstemp()
SCREAMING_SNAKE_CASE_ = False
with fdopen(_lowercase, 'w' ) as new_file:
with open(_lowercase ) as old_file:
for line in old_file:
new_file.write(_lowercase )
if line_to_copy_below in line:
SCREAMING_SNAKE_CASE_ = True
for line_to_copy in lines_to_copy:
new_file.write(_lowercase )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_lowercase, _lowercase )
# Remove original file
remove(_lowercase )
# Move new file
move(_lowercase, _lowercase )
def skip_units(_lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowercase ):
with open(_lowercase ) as datafile:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = line.split('"' )[1]
SCREAMING_SNAKE_CASE_ = skip_units(_lowercase )
elif "# Below: " in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = line.split('"' )[1]
SCREAMING_SNAKE_CASE_ = skip_units(_lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowercase, _lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = []
elif "# Replace with" in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = []
elif "##" not in line:
lines_to_copy.append(_lowercase )
remove(_lowercase )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_lowercase )
| 294
| 0
|
import random
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ , a__ , a__ = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
a__ = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
a__ = 0
a__ , a__ , a__ = _partition(__UpperCAmelCase , __UpperCAmelCase )
a__ = len(__UpperCAmelCase )
a__ = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 148
|
from __future__ import annotations
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = get_failure_array(__UpperCAmelCase )
# 2) Step through text searching for pattern
a__ , a__ = 0, 0 # index into text, pattern
while i < len(__UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(__UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a__ = failure[j - 1]
continue
i += 1
return False
def __a ( __UpperCAmelCase ):
a__ = [0]
a__ = 0
a__ = 1
while j < len(__UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a__ = failure[i - 1]
continue
j += 1
failure.append(__UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
a_ : Tuple = 'abc1abc12'
a_ : Optional[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a_ : Optional[Any] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
a_ : Any = 'ABABX'
a_ : Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
a_ : Union[str, Any] = 'AAAB'
a_ : int = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
a_ : Tuple = 'abcdabcy'
a_ : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
a_ : Dict = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 148
| 1
|
import random
from typing import Any
def a (lowerCAmelCase__ ):
for _ in range(len(lowerCAmelCase__ ) ):
__a = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
__a = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
__a , __a = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 99
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_ ( A__ : str , A__ : str = "cpu" , A__ : Union[str, None] = None ):
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.load(A__ , map_location=A__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A__ , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCAmelCase_ : List[str] = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : List[str] = src_path
torch.save(A__ , A__ )
if __name__ == "__main__":
fire.Fire(convert)
| 275
| 0
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_A : str = logging.get_logger(__name__)
class _lowercase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
requires_backends(self , ["""bs4"""] )
super().__init__(**__A )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowerCAmelCase = parent.find_all(child.name , recursive=__A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__A ) else next(i for i, s in enumerate(__A , 1 ) if s is child ) )
__lowerCAmelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> str:
__lowerCAmelCase = BeautifulSoup(__A , """html.parser""" )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for element in html_code.descendants:
if type(__A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowerCAmelCase = html.unescape(__A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__A )
__lowerCAmelCase , __lowerCAmelCase = self.xpath_soup(__A )
stringaxtag_seq.append(__A )
stringaxsubs_seq.append(__A )
if len(__A ) != len(__A ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__A ) != len(__A ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
__lowerCAmelCase = """"""
for tagname, subs in zip(__A , __A ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> BatchFeature:
__lowerCAmelCase = False
# Check that strings has a valid type
if isinstance(__A , __A ):
__lowerCAmelCase = True
elif isinstance(__A , (list, tuple) ):
if len(__A ) == 0 or isinstance(html_strings[0] , __A ):
__lowerCAmelCase = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f"""but is of type {type(__A )}.""" )
__lowerCAmelCase = bool(isinstance(__A , (list, tuple) ) and (isinstance(html_strings[0] , __A )) )
if not is_batched:
__lowerCAmelCase = [html_strings]
# Get nodes + xpaths
__lowerCAmelCase = []
__lowerCAmelCase = []
for html_string in html_strings:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.get_three_from_single(__A )
nodes.append(__A )
__lowerCAmelCase = []
for node, tag_list, sub_list in zip(__A , __A , __A ):
__lowerCAmelCase = self.construct_xpath(__A , __A )
xpath_strings.append(__A )
xpaths.append(__A )
# return as Dict
__lowerCAmelCase = {"""nodes""": nodes, """xpaths""": xpaths}
__lowerCAmelCase = BatchFeature(data=__A , tensor_type=__A )
return encoded_inputs
| 715
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase = -1
__lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCAmelCase = a * b * c
if candidate >= product:
__lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 330
| 0
|
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
_lowercase : int = []
_lowercase : Optional[Any] = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) # Size of every segment
_lowercase : Union[str, Any] = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : int = end + 1
_lowercase : List[str] = min(2 * end , SCREAMING_SNAKE_CASE )
while low <= n:
_lowercase : str = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE , high + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = False
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Tuple = high + 1
_lowercase : Any = min(high + end , SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 66
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82
| 0
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.exp(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = torch.sum(_lowerCamelCase , dim=1 ) # sum of exp(x_i)
__SCREAMING_SNAKE_CASE : List[Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = config.output_attentions
__SCREAMING_SNAKE_CASE : Dict = config.output_hidden_states
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList([BertLayer(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
__SCREAMING_SNAKE_CASE : Any = nn.ModuleList([BertHighway(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
__SCREAMING_SNAKE_CASE : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
if (type(lowerCAmelCase__ ) is float) or (type(lowerCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = x
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = x
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = ()
__SCREAMING_SNAKE_CASE : Dict = ()
__SCREAMING_SNAKE_CASE : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Tuple = all_hidden_states + (hidden_states,)
__SCREAMING_SNAKE_CASE : List[Any] = layer_module(
lowerCAmelCase__ , lowerCAmelCase__ , head_mask[i] , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = layer_outputs[0]
if self.output_attentions:
__SCREAMING_SNAKE_CASE : str = all_attentions + (layer_outputs[1],)
__SCREAMING_SNAKE_CASE : int = (hidden_states,)
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
__SCREAMING_SNAKE_CASE : str = current_outputs + (all_attentions,)
__SCREAMING_SNAKE_CASE : Optional[Any] = self.highway[i](lowerCAmelCase__ )
# logits, pooled_output
if not self.training:
__SCREAMING_SNAKE_CASE : str = highway_exit[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = entropy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__SCREAMING_SNAKE_CASE : Dict = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__SCREAMING_SNAKE_CASE : int = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCAmelCase__ , i + 1 )
else:
__SCREAMING_SNAKE_CASE : List[str] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Dict = all_hidden_states + (hidden_states,)
__SCREAMING_SNAKE_CASE : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_hidden_states,)
if self.output_attentions:
__SCREAMING_SNAKE_CASE : List[Any] = outputs + (all_attentions,)
__SCREAMING_SNAKE_CASE : str = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , lowerCamelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = config
__SCREAMING_SNAKE_CASE : Optional[int] = BertEmbeddings(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DeeBertEncoder(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = BertPooler(lowerCAmelCase__ )
self.init_weights()
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
return self.embeddings.word_embeddings
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = value
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase__ )
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : Tuple = input_ids.size()
elif inputs_embeds is not None:
__SCREAMING_SNAKE_CASE : List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__SCREAMING_SNAKE_CASE : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__SCREAMING_SNAKE_CASE : str = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if encoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if token_type_ids is None:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__SCREAMING_SNAKE_CASE : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__SCREAMING_SNAKE_CASE : Any = encoder_attention_mask[:, None, None, :]
__SCREAMING_SNAKE_CASE : int = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers )
__SCREAMING_SNAKE_CASE : str = self.embeddings(
input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.encoder(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_outputs[0]
__SCREAMING_SNAKE_CASE : Tuple = self.pooler(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = message
__SCREAMING_SNAKE_CASE : List[str] = exit_layer # start from 1!
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Any = BertPooler(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = encoder_outputs[0]
__SCREAMING_SNAKE_CASE : str = self.pooler(lowerCAmelCase__ )
# "return" pooler_output
# BertModel
__SCREAMING_SNAKE_CASE : str = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__SCREAMING_SNAKE_CASE : Tuple = bmodel_output[1]
__SCREAMING_SNAKE_CASE : Dict = self.dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.classifier(lowerCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , lowerCamelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = config.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = config.num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = DeeBertModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Tuple=-1 , lowerCAmelCase__ : Union[str, Any]=False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.bert(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__SCREAMING_SNAKE_CASE : List[Any] = outputs[1]
__SCREAMING_SNAKE_CASE : str = self.dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.classifier(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__SCREAMING_SNAKE_CASE : Union[str, Any] = e.message
__SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
__SCREAMING_SNAKE_CASE : List[str] = outputs[0]
if not self.training:
__SCREAMING_SNAKE_CASE : Optional[Any] = entropy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : Tuple = MSELoss()
__SCREAMING_SNAKE_CASE : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__SCREAMING_SNAKE_CASE : List[str] = []
for highway_exit in outputs[-1]:
__SCREAMING_SNAKE_CASE : str = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : List[Any] = MSELoss()
__SCREAMING_SNAKE_CASE : List[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase__ )
if train_highway:
__SCREAMING_SNAKE_CASE : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__SCREAMING_SNAKE_CASE : Dict = (loss,) + outputs
if not self.training:
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__SCREAMING_SNAKE_CASE : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 705
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = (KDPMaDiscreteScheduler,)
_A : Dict = 10
def UpperCamelCase__ ( self : Optional[int] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if torch_device == "mps":
return
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : str = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = output.prev_sample
__SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
if torch_device == "mps":
return
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.prev_sample
__SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
if str(lowerCAmelCase__ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 178
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ = None ) -> None:
"""simple docstring"""
if components is None:
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[str] = list(SCREAMING_SNAKE_CASE__ )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__( self ) -> str:
"""simple docstring"""
return "(" + ",".join(map(SCREAMING_SNAKE_CASE__ ,self.__components ) ) + ")"
def __add__( self ,SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else:
raise Exception('''must have the same size''' )
def __sub__( self ,SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self ,SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__( self ,SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
...
def __mul__( self ,SCREAMING_SNAKE_CASE__ ) -> float | Vector:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
__SCREAMING_SNAKE_CASE :Tuple = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and len(self ) == len(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = len(self )
__SCREAMING_SNAKE_CASE :Tuple = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return sum(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception('''invalid operand!''' )
def _UpperCamelCase ( self ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__SCREAMING_SNAKE_CASE :Optional[int] = value
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
__SCREAMING_SNAKE_CASE :Dict = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = False ) -> float:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self * other
__SCREAMING_SNAKE_CASE :Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( a_ : int ) -> Vector:
assert isinstance(a_ , a_ )
return Vector([0] * dimension )
def __lowerCamelCase ( a_ : int , a_ : int ) -> Vector:
assert isinstance(a_ , a_ ) and (isinstance(a_ , a_ ))
__SCREAMING_SNAKE_CASE :Dict = [0] * dimension
__SCREAMING_SNAKE_CASE :str = 1
return Vector(a_ )
def __lowerCamelCase ( a_ : float , a_ : Vector , a_ : Vector ) -> Vector:
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (isinstance(a_ , (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( a_ : int , a_ : int , a_ : int ) -> Vector:
random.seed(a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = [random.randint(a_ , a_ ) for _ in range(a_ )]
return Vector(a_ )
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = matrix
__SCREAMING_SNAKE_CASE :List[Any] = w
__SCREAMING_SNAKE_CASE :List[Any] = h
def __str__( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self ,SCREAMING_SNAKE_CASE__ ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__SCREAMING_SNAKE_CASE :Tuple = []
for i in range(self.__height ):
__SCREAMING_SNAKE_CASE :Optional[int] = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ ,self.__width ,self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self ,SCREAMING_SNAKE_CASE__ ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__SCREAMING_SNAKE_CASE :str = []
for i in range(self.__height ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ ,self.__width ,self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self ,SCREAMING_SNAKE_CASE__ ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self ,SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
...
def __mul__( self ,SCREAMING_SNAKE_CASE__ ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ): # matrix-vector
if len(SCREAMING_SNAKE_CASE__ ) == self.__width:
__SCREAMING_SNAKE_CASE :str = zero_vector(self.__height )
for i in range(self.__height ):
__SCREAMING_SNAKE_CASE :Optional[int] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE__ ,sum(SCREAMING_SNAKE_CASE__ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(SCREAMING_SNAKE_CASE__ ,(int, float) ): # matrix-scalar
__SCREAMING_SNAKE_CASE :str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE__ ,self.__width ,self.__height )
return None
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.__height
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.__width
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__SCREAMING_SNAKE_CASE :List[Any] = value
else:
raise Exception('''change_component: indices out of bounds''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
__SCREAMING_SNAKE_CASE :Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__SCREAMING_SNAKE_CASE :Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE__ ,self.__width - 1 ,self.__height - 1 ).determinant()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
raise Exception('''Indices out of bounds''' )
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__SCREAMING_SNAKE_CASE :int = [
self.__matrix[0][y] * self.cofactor(0 ,SCREAMING_SNAKE_CASE__ ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( a_ : int ) -> Matrix:
__SCREAMING_SNAKE_CASE :list[list[float]] = [[0] * n for _ in range(a_ )]
return Matrix(a_ , a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : int , a_ : int , a_ : int ) -> Matrix:
random.seed(a_ )
__SCREAMING_SNAKE_CASE :list[list[float]] = [
[random.randint(a_ , a_ ) for _ in range(a_ )] for _ in range(a_ )
]
return Matrix(a_ , a_ , a_ )
| 498
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE( A ):
@staticmethod
@abstractmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
| 498
| 1
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase( a , a , a , a ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def _lowerCamelCase( a , a , a , a , a=True ):
model.train()
__a = model(a )
__a = F.mse_loss(a , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(a )
def _lowerCamelCase( a , a=False ):
set_seed(4_2 )
__a = RegressionModel()
__a = deepcopy(a )
__a = RegressionDataset(length=8_0 )
__a = DataLoader(a , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__a = AdamW(params=model.parameters() , lr=1E-3 )
__a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__a = LambdaLR(a , lr_lambda=lambda a : epoch**0.65 )
__a = LambdaLR(a , lr_lambda=lambda a : epoch**0.65 )
# Make a copy of `model`
if sched:
__a , __a , __a , __a = accelerator.prepare(a , a , a , a )
else:
__a , __a = accelerator.prepare(a , a )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase( a ):
# Test when on a single CPU or GPU that the context manager does nothing
__a , __a , __a = get_training_setup(a )
# Use a single batch
__a , __a = next(iter(a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a , a , a , a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a ):
step_model(a , a , a , a )
else:
# Sync grads
step_model(a , a , a , a )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(a , a , a , a )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__a = ddp_input[torch.randperm(len(a ) )]
def _lowerCamelCase( a ):
# Test on distributed setup that context manager behaves properly
__a , __a , __a = get_training_setup(a )
# Use a single batch
__a , __a = next(iter(a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a , a , a , a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a ):
step_model(a , a , a , a )
else:
# Sync grads
step_model(a , a , a , a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__a = ddp_input[torch.randperm(len(a ) )]
def _lowerCamelCase( a=False , a=False ):
__a = Accelerator(
split_batches=a , dispatch_batches=a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a = get_training_setup(a )
for iteration, batch in enumerate(a ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a , a , a , a , a )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(a ):
step_model(a , a , a , a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(a ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__a = ddp_input[torch.randperm(len(a ) )]
GradientState._reset_state()
def _lowerCamelCase( a=False , a=False ):
__a = Accelerator(
split_batches=a , dispatch_batches=a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a , __a , __a , __a , __a = get_training_setup(a , a )
for iteration, batch in enumerate(a ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(a , a , a , a , a )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(a ):
step_model(a , a , a , a )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a ))
if accelerator.num_processes > 1:
check_model_parameters(a , a , a , a )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def _lowerCamelCase( ):
__a = Accelerator()
__a = RegressionDataset(length=8_0 )
__a = DataLoader(a , batch_size=1_6 )
__a = RegressionDataset(length=9_6 )
__a = DataLoader(a , batch_size=1_6 )
__a , __a = accelerator.prepare(a , a )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a )
if iteration < len(a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a )
if batch_num < len(a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase( ):
__a = Accelerator()
__a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(a )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(a )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(a , a )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(a , a )
def _lowerCamelCase( a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 706
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _A :
'''simple docstring'''
def _snake_case ( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
return None
class _A :
'''simple docstring'''
def _snake_case ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Any ):
'''simple docstring'''
return None
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : str ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
from transformers import BertModel
__lowercase = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowerCamelCase ) )
vocab_file.flush()
__lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__lowercase = BertModel(BertConfig(vocab_size=len(lowerCamelCase ) ) )
model.save_pretrained(lowerCamelCase )
self._test_export(lowerCamelCase , "pt" , 12 , lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowercase = self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
__lowercase = quantize(Path(lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : str ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowercase = self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
__lowercase = quantize(lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[str]=None , **lowerCamelCase : Optional[int] ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
return path
except Exception as e:
self.fail(lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : List[str] ):
'''simple docstring'''
from transformers import BertModel
__lowercase = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__lowercase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
'''simple docstring'''
from transformers import TFBertModel
__lowercase = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__lowercase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "tf" )
def _snake_case ( self : Any , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = FeatureExtractionPipeline(lowerCamelCase , lowerCamelCase )
__lowercase = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
__lowercase , __lowercase , __lowercase , __lowercase = infer_shapes(lowerCamelCase , lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = ["input_ids", "attention_mask", "token_type_ids"]
__lowercase = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
__lowercase , __lowercase = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase ) , set(lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__lowercase , __lowercase = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(len(lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 402
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return 1_0 - x * x
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) >= 0:
raise ValueError("Wrong space!" )
__lowercase = a
while (b - a) >= 0.0_1:
# Find middle point
__lowercase = (a + b) / 2
# Check if middle point is root
if equation(_SCREAMING_SNAKE_CASE ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) < 0:
__lowercase = c
else:
__lowercase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 402
| 1
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = AudioLDMPipeline
lowercase__ = TEXT_TO_AUDIO_PARAMS
lowercase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase__ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=(32, 64), class_embed_type='''simple_projection''', projection_class_embeddings_input_dim=32, class_embeddings_concat=lowerCamelCase, )
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowercase__ = ClapTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, projection_dim=32, )
lowercase__ = ClapTextModelWithProjection(lowerCamelCase )
lowercase__ = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''', model_max_length=77 )
lowercase__ = SpeechTaHifiGanConfig(
model_in_dim=8, sampling_rate=16_000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=lowerCamelCase, )
lowercase__ = SpeechTaHifiGan(lowerCamelCase )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[Any]=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs['''prompt''']]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs.pop('''prompt''' )]
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=audioldm_pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = text_inputs['''input_ids'''].to(lowerCamelCase )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase, )
lowercase__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase, dim=-1 )
lowercase__ = prompt_embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * ['''this is a negative prompt''']
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs['''prompt''']]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs.pop('''prompt''' )]
lowercase__ = []
for p in [prompt, negative_prompt]:
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=audioldm_pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = text_inputs['''input_ids'''].to(lowerCamelCase )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase, )
lowercase__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase, dim=-1 )
embeds.append(lowerCamelCase )
lowercase__ , lowercase__ = embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = '''egg cracking'''
lowercase__ = audioldm_pipe(**lowerCamelCase, negative_prompt=lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe([prompt] * batch_size, num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase__ = 2
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=2, num_waveforms_per_prompt=lowerCamelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe(
[prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=lowerCamelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = audioldm_pipe.vocoder.config.sampling_rate
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(audio_length_in_s=0.016, **lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) / vocoder_sampling_rate == 0.016
lowercase__ = audioldm_pipe(audio_length_in_s=0.032, **lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) / vocoder_sampling_rate == 0.032
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = ['''hey''']
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=1 )
lowercase__ = output.audios.shape
assert audio_shape == (1, 256)
lowercase__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase__ = SpeechTaHifiGan(lowerCamelCase ).to(lowerCamelCase )
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=1 )
lowercase__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowercase__ ( self : int ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase )
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[Any]="cpu", lowerCamelCase : List[Any]=torch.floataa, lowerCamelCase : List[str]=0 ):
'''simple docstring'''
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = np.random.RandomState(lowerCamelCase ).standard_normal((1, 8, 128, 16) )
lowercase__ = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase, dtype=lowerCamelCase )
lowercase__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = 25
lowercase__ = audioldm_pipe(**lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 81_920
lowercase__ = audio[77_230:77_240]
lowercase__ = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(**lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 81_920
lowercase__ = audio[27_780:27_790]
lowercase__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
A_ = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
A_ = "▁"
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = ["input_ids", "token_type_ids"]
__lowerCamelCase : str = FNetTokenizer
def __init__( self: Dict , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Any=False , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple="<unk>" , UpperCamelCase_: Union[str, Any]="[SEP]" , UpperCamelCase_: str="<pad>" , UpperCamelCase_: Any="[CLS]" , UpperCamelCase_: Tuple="[MASK]" , **UpperCamelCase_: int , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCamelCase_ =(
AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ , normalized=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else mask_token
)
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase_ =do_lower_case
UpperCamelCase_ =remove_space
UpperCamelCase_ =keep_accents
UpperCamelCase_ =vocab_file
UpperCamelCase_ =False if not self.vocab_file else True
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
UpperCamelCase_ =[self.sep_token_id]
UpperCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
UpperCamelCase_ =[self.sep_token_id]
UpperCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ =os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 391
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
set_seed(770)
A_ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
A_ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
A_ = os.path.dirname(os.path.abspath(__file__))
A_ = os.path.join(os.path.expanduser("~"), ".cache")
A_ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _UpperCamelCase ( A , A=False ):
UpperCamelCase_ =model_type
if use_small:
key += "_small"
return os.path.join(A , REMOTE_MODEL_PATHS[key]["file_name"] )
def _UpperCamelCase ( A , A ):
os.makedirs(A , exist_ok=A )
hf_hub_download(repo_id=A , filename=A , local_dir=A )
def _UpperCamelCase ( A , A , A=False , A="text" ):
if model_type == "text":
UpperCamelCase_ =BarkSemanticModel
UpperCamelCase_ =BarkSemanticConfig
UpperCamelCase_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCamelCase_ =BarkCoarseModel
UpperCamelCase_ =BarkCoarseConfig
UpperCamelCase_ =BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCamelCase_ =BarkFineModel
UpperCamelCase_ =BarkFineConfig
UpperCamelCase_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCamelCase_ =f"""{model_type}_small""" if use_small else model_type
UpperCamelCase_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
UpperCamelCase_ =torch.load(A , map_location=A )
# this is a hack
UpperCamelCase_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
UpperCamelCase_ =model_args["vocab_size"]
UpperCamelCase_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCamelCase_ =model_args.pop("n_head" )
UpperCamelCase_ =model_args.pop("n_embd" )
UpperCamelCase_ =model_args.pop("n_layer" )
UpperCamelCase_ =ConfigClass(**checkpoint["model_args"] )
UpperCamelCase_ =ModelClass(config=A )
UpperCamelCase_ =GenerationConfigClass()
UpperCamelCase_ =model_generation_config
UpperCamelCase_ =checkpoint["model"]
# fixup checkpoint
UpperCamelCase_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(A ):
# replace part of the key with corresponding layer name in HF implementation
UpperCamelCase_ =k[len(A ) :]
for old_layer_name in new_layer_name_dict:
UpperCamelCase_ =new_k.replace(A , new_layer_name_dict[old_layer_name] )
UpperCamelCase_ =state_dict.pop(A )
UpperCamelCase_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCamelCase_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
UpperCamelCase_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCamelCase_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(A ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(A ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(A , strict=A )
UpperCamelCase_ =model.num_parameters(exclude_embeddings=A )
UpperCamelCase_ =checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss""" )
model.eval()
model.to(A )
del checkpoint, state_dict
return model
def _UpperCamelCase ( A , A=False , A="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCamelCase_ ="cpu" # do conversion on cpu
UpperCamelCase_ =_get_ckpt_path(A , use_small=A )
UpperCamelCase_ =_load_model(A , A , model_type=A , use_small=A )
# load bark initial model
UpperCamelCase_ =_bark_load_model(A , "cpu" , model_type=A , use_small=A )
if model_type == "text":
UpperCamelCase_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
UpperCamelCase_ =5
UpperCamelCase_ =10
if model_type in ["text", "coarse"]:
UpperCamelCase_ =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
UpperCamelCase_ =bark_model(A )[0]
UpperCamelCase_ =model(A )
# take last logits
UpperCamelCase_ =output_new_model_total.logits[:, [-1], :]
else:
UpperCamelCase_ =3
UpperCamelCase_ =8
UpperCamelCase_ =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCamelCase_ =model(A , A )
UpperCamelCase_ =bark_model(A , A )
UpperCamelCase_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
def _UpperCamelCase ( A , A , A , A , A , A , ):
UpperCamelCase_ =os.path.join(A , A )
UpperCamelCase_ =BarkSemanticConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =BarkCoarseConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =BarkFineConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
UpperCamelCase_ =BarkSemanticModel.from_pretrained(A )
UpperCamelCase_ =BarkCoarseModel.from_pretrained(A )
UpperCamelCase_ =BarkFineModel.from_pretrained(A )
UpperCamelCase_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
UpperCamelCase_ =BarkConfig.from_sub_model_configs(
A , A , A , A )
UpperCamelCase_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCamelCase_ =BarkModel(A )
UpperCamelCase_ =semantic
UpperCamelCase_ =coarseAcoustic
UpperCamelCase_ =fineAcoustic
UpperCamelCase_ =codec
UpperCamelCase_ =bark_generation_config
Path(A ).mkdir(exist_ok=A )
bark.save_pretrained(A , repo_id=A , push_to_hub=A )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
A_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 391
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 'donut-swin'
lowerCAmelCase__ : Optional[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: str , __lowerCAmelCase: int=224 , __lowerCAmelCase: Optional[Any]=4 , __lowerCAmelCase: List[str]=3 , __lowerCAmelCase: Union[str, Any]=96 , __lowerCAmelCase: List[Any]=[2, 2, 6, 2] , __lowerCAmelCase: Union[str, Any]=[3, 6, 12, 24] , __lowerCAmelCase: Optional[Any]=7 , __lowerCAmelCase: Optional[int]=4.0 , __lowerCAmelCase: Optional[int]=True , __lowerCAmelCase: int=0.0 , __lowerCAmelCase: int=0.0 , __lowerCAmelCase: List[Any]=0.1 , __lowerCAmelCase: Optional[Any]="gelu" , __lowerCAmelCase: Optional[Any]=False , __lowerCAmelCase: Optional[int]=0.02 , __lowerCAmelCase: Any=1E-5 , **__lowerCAmelCase: Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(__lowerCAmelCase )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
| 286
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ['transformers', 'torch', 'note_seq']
def __init__( self: List[str] , *__lowerCAmelCase: Optional[int] , **__lowerCAmelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Optional[int] , *__lowerCAmelCase: Any , **__lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] , *__lowerCAmelCase: Optional[Any] , **__lowerCAmelCase: Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 286
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : List[str] = DebertaTokenizer
_A : List[str] = True
_A : Tuple = DebertaTokenizerFast
def A_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case__ = {"unk_token": "[UNK]"}
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def A_ ( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A_ ( self , lowerCamelCase ):
snake_case__ = "lower newer"
snake_case__ = "lower newer"
return input_text, output_text
def A_ ( self ):
snake_case__ = self.get_tokenizer()
snake_case__ = "lower newer"
snake_case__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def A_ ( self ):
snake_case__ = self.get_tokenizer()
snake_case__ = tokenizer("Hello" , "World" )
snake_case__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowerCamelCase )
@slow
def A_ ( self ):
snake_case__ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case__ = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
snake_case__ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
snake_case__ = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A_ ( self ):
snake_case__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case__ = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case__ = tokenizer(lowerCamelCase , padding=lowerCamelCase )
snake_case__ = [tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase ) for seq in encoding["input_ids"]]
# fmt: off
snake_case__ = {
"input_ids": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowerCamelCase )
for expected, decoded in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 276
|
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 276
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase (a_ :List[str] , a_ :Union[str, Any] , a_ :Dict=None , a_ :str=None) -> Union[str, Any]:
if attention_mask is None:
lowercase :Optional[int] = tf.cast(tf.math.not_equal(a_ , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __magic_name__ :
__A : int = OPTConfig
__A : Union[str, Any] = {}
__A : Tuple = "gelu"
def __init__( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str=1_3 , snake_case__ : Optional[int]=7 , snake_case__ : Tuple=True , snake_case__ : Any=False , snake_case__ : str=9_9 , snake_case__ : Any=1_6 , snake_case__ : List[str]=2 , snake_case__ : List[Any]=4 , snake_case__ : Any=4 , snake_case__ : Any="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=2_0 , snake_case__ : List[str]=2 , snake_case__ : Optional[int]=1 , snake_case__ : Any=0 , snake_case__ : Tuple=1_6 , snake_case__ : List[str]=1_6 , ):
'''simple docstring'''
lowercase :str = parent
lowercase :Tuple = batch_size
lowercase :List[str] = seq_length
lowercase :Any = is_training
lowercase :Union[str, Any] = use_labels
lowercase :Optional[int] = vocab_size
lowercase :List[Any] = hidden_size
lowercase :int = num_hidden_layers
lowercase :List[str] = num_attention_heads
lowercase :Tuple = intermediate_size
lowercase :str = hidden_act
lowercase :Tuple = hidden_dropout_prob
lowercase :Dict = attention_probs_dropout_prob
lowercase :str = max_position_embeddings
lowercase :Optional[int] = eos_token_id
lowercase :Tuple = pad_token_id
lowercase :Union[str, Any] = bos_token_id
lowercase :Dict = embed_dim
lowercase :Optional[Any] = word_embed_proj_dim
lowercase :List[Any] = False
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase :Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase :List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase :Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case__ , **self.config_updates , )
lowercase :str = prepare_opt_inputs_dict(snake_case__ , snake_case__ )
return config, inputs_dict
def __snake_case ( self : str , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
lowercase :Any = TFOPTModel(config=snake_case__ )
lowercase :List[Any] = inputs_dict['''input_ids''']
lowercase :Optional[Any] = input_ids[:1, :]
lowercase :List[str] = inputs_dict['''attention_mask'''][:1, :]
lowercase :List[Any] = 1
# first forward pass
lowercase :Optional[Any] = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
lowercase , lowercase :int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase :Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase :List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase :int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase :Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ )[0]
lowercase :Dict = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase :List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase :Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
lowercase :Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
@require_tf
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : int = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__A : Optional[int] = (TFOPTForCausalLM,) if is_tf_available() else ()
__A : List[str] = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__A : Tuple = False
__A : Optional[Any] = False
__A : int = False
__A : Optional[Any] = 10
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[int] = TFOPTModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase , lowercase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
if hasattr(snake_case__ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case__ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowercase :Any = model_class(config=snake_case__ )
lowercase :Optional[Any] = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
lowercase :Optional[Any] = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case__ )
lowercase :List[str] = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
lowercase :List[str] = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowercase :Dict = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case__ )
# check that weights remain the same after resizing
lowercase :Optional[int] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase :Tuple = False
self.assertTrue(snake_case__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case__ )
lowercase :Any = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase :Union[str, Any] = False
self.assertTrue(snake_case__ )
def lowerCamelCase (a_ :List[Any]) -> List[Any]:
return tf.constant(a_ , dtype=tf.intaa)
@require_tf
class __magic_name__ ( unittest.TestCase ):
__A : List[str] = 99
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[int] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowercase :Union[str, Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowercase :List[Any] = input_ids.shape[0]
lowercase :Any = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Optional[Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
lowercase :int = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowercase :List[Any] = tf.not_equal(snake_case__ , model.config.pad_token_id )
with tf.GradientTape():
lowercase :List[str] = model(input_ids=snake_case__ , attention_mask=snake_case__ ).last_hidden_state
lowercase :Any = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , snake_case__ )
lowercase :str = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-3 ) )
lowercase :Optional[int] = tf.function(snake_case__ , jit_compile=snake_case__ )
lowercase :List[str] = xla_generate(snake_case__ , snake_case__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-2 ) )
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase :str = '''facebook/opt-350m'''
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :str = TFOPTForCausalLM.from_pretrained(self.path_model )
lowercase :Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
lowercase :int = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowercase :Tuple = tokenizer(snake_case__ , return_tensors='''tf''' , padding=snake_case__ , add_special_tokens=snake_case__ )
lowercase :str = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowercase :Dict = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
lowercase :int = tf.function(snake_case__ , jit_compile=snake_case__ )
lowercase :Optional[int] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Any = '''facebook/opt-125m'''
lowercase :Tuple = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowercase :Tuple = []
lowercase :Dict = GPTaTokenizer.from_pretrained(snake_case__ )
lowercase :Dict = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
lowercase :Any = tokenizer(snake_case__ , return_tensors='''tf''' ).input_ids
lowercase :Optional[int] = model.generate(snake_case__ , max_length=1_0 )
lowercase :Dict = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[int] = '''facebook/opt-350m'''
lowercase :Dict = GPTaTokenizer.from_pretrained(snake_case__ )
lowercase :int = TFOPTForCausalLM.from_pretrained(snake_case__ )
lowercase :int = '''left'''
# use different length sentences to test batching
lowercase :Optional[int] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowercase :Optional[int] = tokenizer(snake_case__ , return_tensors='''tf''' , padding=snake_case__ )
lowercase :Any = inputs['''input_ids''']
lowercase :List[Any] = model.generate(input_ids=snake_case__ , attention_mask=inputs['''attention_mask'''] )
lowercase :List[str] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
lowercase :Tuple = model.generate(input_ids=snake_case__ )
lowercase :Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
lowercase :Tuple = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
lowercase :Dict = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
lowercase :Optional[int] = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowercase :Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
lowercase :List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
lowercase :Union[str, Any] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = '''facebook/opt-350m'''
lowercase :str = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowercase :str = []
lowercase :Tuple = GPTaTokenizer.from_pretrained(snake_case__ )
lowercase :Any = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
lowercase :Optional[int] = tokenizer(snake_case__ , return_tensors='''tf''' ).input_ids
lowercase :Tuple = model.generate(snake_case__ , max_length=1_0 )
lowercase :Tuple = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
| 475
|
"""simple docstring"""
def lowerCamelCase (a_ :int) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''')
lowercase :Any = len(bin(a_)[3:])
lowercase :Any = bin(abs(a_) - (1 << binary_number_length))[3:]
lowercase :Tuple = (
(
'''1'''
+ '''0''' * (binary_number_length - len(a_))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475
| 1
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCAmelCase_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class lowercase__ :
'''simple docstring'''
a : str
a : Optional[str] = None
a : Optional[Union[str, int]] = None
a : Optional[Union[str, int]] = None
a : Optional[Union[str, int]] = None
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Tuple:
"""simple docstring"""
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
return self.major, self.minor, self.patch
def UpperCamelCase__ ( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
if isinstance(__magic_name__, __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__, __magic_name__ ):
return other
raise TypeError(f"{other} (type {type(__magic_name__ )}) cannot be compared to version." )
def __eq__( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
try:
UpperCamelCase__ : int = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self ) -> Union[str, Any]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase__ ( cls, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.version_str
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Dict:
UpperCamelCase__ : Union[str, Any] = _VERSION_REG.match(__UpperCAmelCase )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(__UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Optional[int]:
return ".".join(str(__UpperCAmelCase ) for v in version_tuple )
| 253
|
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE =16
__SCREAMING_SNAKE_CASE =32
def lowercase__( __SCREAMING_SNAKE_CASE : Accelerator , __SCREAMING_SNAKE_CASE : int = 16 ):
lowercase_ : int = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase_ : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(__SCREAMING_SNAKE_CASE : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : int = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : Dict = 16
elif accelerator.mixed_precision != "no":
lowercase_ : List[str] = 8
else:
lowercase_ : Any = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='longest' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase_ : str = DataLoader(
tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE =mocked_dataloaders # noqa: F811
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __SCREAMING_SNAKE_CASE ) == "1":
lowercase_ : List[str] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase_ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase_ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : int = config['lr']
lowercase_ : Any = int(config['num_epochs'] )
lowercase_ : Optional[Any] = int(config['seed'] )
lowercase_ : Dict = int(config['batch_size'] )
set_seed(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[Any] = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowercase_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase_ : int = batch_size // MAX_GPU_BATCH_SIZE
lowercase_ : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Optional[int] = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase_ : List[Any] = os.path.split(__SCREAMING_SNAKE_CASE )[-1].split('.' )[0]
accelerator.init_trackers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase_ : Union[str, Any] = 0
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : str = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(__SCREAMING_SNAKE_CASE ),
'epoch': epoch,
} , step=__SCREAMING_SNAKE_CASE , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase__( ):
lowercase_ : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=__SCREAMING_SNAKE_CASE , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : Union[str, Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 477
|
"""simple docstring"""
from math import sqrt
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__( __SCREAMING_SNAKE_CASE : int = 1_00_01 ):
lowercase_ : str = 0
lowercase_ : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(__SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(__SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }")
| 477
| 1
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = ['a', 'b', 'c']
# Defaults to last layer if both are None
UpperCAmelCase_ , UpperCAmelCase_ : str = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , ['c'] )
self.assertEqual(_UpperCamelCase , [2] )
# Out indices set to match out features
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_aligned_output_features_output_indices(['a', 'c'] , _UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , ['a', 'c'] )
self.assertEqual(_UpperCamelCase , [0, 2] )
# Out features set to match out indices
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , ['a', 'c'] )
self.assertEqual(_UpperCamelCase , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase_ , UpperCAmelCase_ : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , ['a', 'c'] )
self.assertEqual(_UpperCamelCase , [-3, -1] )
def __UpperCAmelCase ( self ) -> Any:
# Stage names must be set
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , _UpperCamelCase )
# Out features must be a list
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(_UpperCamelCase , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = BackboneMixin()
UpperCAmelCase_ : int = ['a', 'b', 'c']
UpperCAmelCase_ : List[Any] = ['a', 'c']
UpperCAmelCase_ : Optional[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase_ : Dict = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase_ : Optional[Any] = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 406
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406
| 1
|
from __future__ import annotations
snake_case = 8.988e9 # units = N * m^s * C^-2
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
SCREAMING_SNAKE_CASE : Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = abs(lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
SCREAMING_SNAKE_CASE : List[str] = abs(lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
SCREAMING_SNAKE_CASE : str = (COULOMBS_CONSTANT * charge_product / abs(lowercase )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
def lowerCamelCase__ ( lowercase , lowercase=0.999 , lowercase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(lowercase ):
SCREAMING_SNAKE_CASE : str = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase ) / alpha_bar_fn(lowercase ) , lowercase ) )
return torch.tensor(lowercase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = 1
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 1000 , UpperCAmelCase_ : float = 0.0_001 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : float = 1.0 , **UpperCAmelCase_ : Dict , ):
if kwargs.get("set_alpha_to_one" , UpperCAmelCase_ ) is not None:
SCREAMING_SNAKE_CASE : int = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = kwargs["set_alpha_to_one"]
if trained_betas is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE : List[str] = torch.linspace(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE : int = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE : Tuple = 1.0 - self.betas
SCREAMING_SNAKE_CASE : Any = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : List[str] = 1.0
# setable values
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.arange(0 , UpperCAmelCase_ ).copy().astype(np.intaa ) )
def _A ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[int] = None ):
return sample
def _A ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE : Dict = num_inference_steps
SCREAMING_SNAKE_CASE : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : str = (np.arange(0 , UpperCAmelCase_ ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
self.timesteps += self.config.steps_offset
def _A ( self : Any , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : bool = True , ):
# 1. get previous step value (=t+1)
SCREAMING_SNAKE_CASE : Tuple = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE : List[str] = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : Union[str, Any] = model_output
SCREAMING_SNAKE_CASE : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : List[str] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
def __len__( self : Dict ):
return self.config.num_train_timesteps
| 488
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''dpr'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=30_522 , __UpperCAmelCase : Tuple=768 , __UpperCAmelCase : List[Any]=12 , __UpperCAmelCase : List[Any]=12 , __UpperCAmelCase : int=3_072 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : List[str]=512 , __UpperCAmelCase : str=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Optional[Any]=1e-1_2 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Union[str, Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = projection_dim
a = position_embedding_type
| 117
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :List[str] , a :Tuple=False ) -> List[Any]:
a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( a :List[str] , a :int , a :Tuple=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
a = ''''''
else:
a = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[
: config.hidden_size, :
]
a = in_proj_bias[: config.hidden_size]
a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a = in_proj_weight[
-config.hidden_size :, :
]
a = in_proj_bias[-config.hidden_size :]
def _a ( a :List[Any] ) -> Dict:
a = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a , a )
def _a ( a :Union[str, Any] , a :str , a :List[Any] ) -> str:
a = dct.pop(a )
a = val
def _a ( ) -> Optional[Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _a ( a :Tuple , a :str , a :Optional[int]=True ) -> Dict:
a = ViTConfig()
# patch_size
if model_name[-1] == "8":
a = 8
# set labels if required
if not base_model:
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a = 384
a = 1_536
a = 12
a = 6
# load original model from torch hub
a = torch.hub.load('''facebookresearch/dino:main''' , a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a = original_model.state_dict()
if base_model:
remove_classification_head_(a )
a = create_rename_keys(a , base_model=a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
# load HuggingFace model
if base_model:
a = ViTModel(a , add_pooling_layer=a ).eval()
else:
a = ViTForImageClassification(a ).eval()
model.load_state_dict(a )
# Check outputs on an image, prepared by ViTImageProcessor
a = ViTImageProcessor()
a = image_processor(images=prepare_img() , return_tensors='''pt''' )
a = encoding['''pixel_values''']
a = model(a )
if base_model:
a = original_model(a )
assert torch.allclose(a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
a = original_model(a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 117
| 1
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= str(id_ )
lowercase__ : Optional[Any]= None
lowercase__ : List[Any]= None
lowercase__ : Dict= []
lowercase__ : Dict= {} # {vertex:distance}
def __lt__( self , snake_case__ ):
'''simple docstring'''
return self.key < other.key
def __repr__( self ):
'''simple docstring'''
return self.id
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
self.neighbors.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= weight
def lowercase__(A , A , A , A ) ->Any:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , A )
graph[b - 1].add_edge(graph[a - 1] , A )
def lowercase__(A , A ) ->list:
"""simple docstring"""
lowercase__ : Tuple= []
for u in graph:
lowercase__ : Dict= math.inf
lowercase__ : Dict= None
lowercase__ : List[Any]= 0
lowercase__ : List[str]= graph[:]
while q:
lowercase__ : Dict= min(A )
q.remove(A )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ : Union[str, Any]= u
lowercase__ : Tuple= u.edges[v.id]
for i in range(1 , len(A ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__(A , A ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
lowercase__ : Optional[int]= math.inf
lowercase__ : List[str]= None
lowercase__ : Optional[Any]= 0
lowercase__ : Dict= list(A )
hq.heapify(A )
while h:
lowercase__ : List[Any]= hq.heappop(A )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ : List[str]= u
lowercase__ : int= u.edges[v.id]
hq.heapify(A )
for i in range(1 , len(A ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__() ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss
lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy()
lowercase__ : int= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 85
| 0
|
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( __lowerCAmelCase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ) -> Tuple: # noqa: E741
while r - l > 1:
snake_case = (l + r) // 2
if v[m] >= key:
snake_case = m
else:
snake_case = m # noqa: E741
return r
def __lowerCamelCase ( __lowerCAmelCase : list[int] ) -> int:
if len(__lowerCAmelCase ) == 0:
return 0
snake_case = [0] * len(__lowerCAmelCase )
snake_case = 1
snake_case = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
snake_case = v[i]
elif v[i] > tail[length - 1]:
snake_case = v[i]
length += 1
else:
snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
| 1
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : List[str]="" , snake_case__ : List[Any]="train" ):
'''simple docstring'''
assert os.path.isdir(snake_case__ )
lowercase :Union[str, Any] = []
lowercase :str = os.listdir(snake_case__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowercase :int = os.path.join(snake_case__ , snake_case__ )
if not os.path.isfile(snake_case__ ):
continue
self.documents.append(snake_case__ )
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Union[str, Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Dict = self.documents[idx]
lowercase :Optional[int] = document_path.split('''/''' )[-1]
with open(snake_case__ , encoding='''utf-8''' ) as source:
lowercase :str = source.read()
lowercase , lowercase :str = process_story(snake_case__ )
return document_name, story_lines, summary_lines
def lowerCamelCase (a_ :Tuple) -> List[Any]:
lowercase :Optional[Any] = list(filter(lambda a_: len(a_) != 0 , [line.strip() for line in raw_story.split('''\n''')]))
# for some unknown reason some lines miss a period, add it
lowercase :Tuple = [_add_missing_period(a_) for line in nonempty_lines]
# gather article lines
lowercase :Optional[int] = []
lowercase :str = deque(a_)
while True:
try:
lowercase :Optional[Any] = lines.popleft()
if element.startswith('''@highlight'''):
break
story_lines.append(a_)
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowercase :Tuple = list(filter(lambda a_: not t.startswith('''@highlight''') , a_))
return story_lines, summary_lines
def lowerCamelCase (a_ :List[Any]) -> Optional[int]:
lowercase :int = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight'''):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCamelCase (a_ :List[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> List[Any]:
if len(a_) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(a_)))
return sequence
def lowerCamelCase (a_ :Tuple , a_ :Union[str, Any]) -> Union[str, Any]:
lowercase :Any = torch.ones_like(a_)
lowercase :Union[str, Any] = sequence == pad_token_id
lowercase :str = 0
return mask
def lowerCamelCase (a_ :Optional[int] , a_ :Any , a_ :Union[str, Any]) -> List[str]:
lowercase :Optional[Any] = [tokenizer.encode(a_) for line in story_lines]
lowercase :int = [token for sentence in story_lines_token_ids for token in sentence]
lowercase :int = [tokenizer.encode(a_) for line in summary_lines]
lowercase :Optional[int] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCamelCase (a_ :Dict , a_ :str) -> Optional[int]:
lowercase :int = []
for sequence in batch:
lowercase :Dict = -1
lowercase :List[str] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2)
batch_embeddings.append(a_)
return torch.tensor(a_)
| 475
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase (a_ :Dict) -> Dict:
lowercase :Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_)
def lowerCamelCase (a_ :Union[str, Any]) -> str:
lowercase , lowercase :Tuple = emb.weight.shape
lowercase :List[str] = nn.Linear(a_ , a_ , bias=a_)
lowercase :List[str] = emb.weight.data
return lin_layer
def lowerCamelCase (a_ :int , a_ :Union[str, Any]="facebook/mbart-large-en-ro" , a_ :Union[str, Any]=False , a_ :List[Any]=False) -> List[Any]:
lowercase :List[Any] = torch.load(a_ , map_location='''cpu''')['''model''']
remove_ignore_keys_(a_)
lowercase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase :Tuple = MBartConfig.from_pretrained(a_ , vocab_size=a_)
if mbart_aa and finetuned:
lowercase :List[Any] = '''relu'''
lowercase :Optional[int] = state_dict['''decoder.embed_tokens.weight''']
lowercase :Union[str, Any] = MBartForConditionalGeneration(a_)
model.model.load_state_dict(a_)
if finetuned:
lowercase :Dict = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 475
| 1
|
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int, lowercase : int ) -> float:
"""simple docstring"""
_UpperCamelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def a__ ( ) -> Tuple:
"""simple docstring"""
print(sum_of_series(1, 1, 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = '\nHuman: <<task>>\n\nAssistant: '
A = 'huggingface-tools/default-prompts'
A = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: str , lowerCamelCase_: Tuple="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
snake_case : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
snake_case : Optional[int] = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 449
| 0
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _a :
"""simple docstring"""
def lowercase__ ( self : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] )->Dict:
pass
def lowercase__ ( self : Union[str, Any] )->Dict:
pass
def lowercase__ ( self : List[Any] )->Tuple:
pass
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray , __UpperCamelCase : float )->int:
_UpperCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCamelCase , __UpperCamelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase__ ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str]=None , **__UpperCamelCase : str )->Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
_UpperCAmelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
_UpperCAmelCase = after_output[0]
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1e-3 )
def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[str]=None , **__UpperCamelCase : int )->Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase__ ( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str] )->Any:
pt_model.to(__UpperCamelCase )
pt_model.eval()
# prepare inputs
_UpperCAmelCase = inputs_dict
_UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCAmelCase = pt_model(**__UpperCamelCase ).to_tuple()
_UpperCAmelCase = fx_model(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
_UpperCAmelCase = fx_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = VisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_flax=__UpperCamelCase )
pt_model_loaded.to(__UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCAmelCase = pt_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output_loaded.numpy() , 4e-2 )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = VisionTextDualEncoderModel(__UpperCamelCase )
_UpperCAmelCase = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
_UpperCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCamelCase )
_UpperCAmelCase = fx_state
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = VisionTextDualEncoderModel(__UpperCamelCase )
_UpperCAmelCase = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
_UpperCAmelCase = load_flax_weights_in_pytorch_model(__UpperCamelCase , fx_model.params )
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->List[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase )
def lowercase__ ( self : int )->List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCamelCase )
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCamelCase )
@is_pt_flax_cross_test
def lowercase__ ( self : List[str] )->Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = config_inputs_dict.pop('''vision_config''' )
_UpperCAmelCase = config_inputs_dict.pop('''text_config''' )
_UpperCAmelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.check_equivalence_flax_to_pt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.get_pretrained_model_and_inputs()
_UpperCAmelCase = model_a(**__UpperCamelCase )
_UpperCAmelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model_a(**__UpperCamelCase )
_UpperCAmelCase = after_outputs[0]
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1e-5 )
@require_flax
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Tuple )->List[Any]:
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = FlaxViTModel(__UpperCamelCase )
_UpperCAmelCase = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase = FlaxViTModelTester(self )
_UpperCAmelCase = FlaxBertModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = FlaxCLIPVisionModel(__UpperCamelCase )
_UpperCAmelCase = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = FlaxCLIPVisionModelTester(self )
_UpperCAmelCase = FlaxBertModelTester(self )
_UpperCAmelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Any )->List[Any]:
_UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors='''np''' )
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1e-3 ) )
| 718
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """bit"""
UpperCamelCase__ = ["""preactivation""", """bottleneck"""]
UpperCamelCase__ = ["""SAME""", """VALID"""]
def __init__( self : List[Any] , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Optional[int]=6_4 , __UpperCamelCase : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __UpperCamelCase : Any=[3, 4, 6, 3] , __UpperCamelCase : str="preactivation" , __UpperCamelCase : Union[str, Any]="relu" , __UpperCamelCase : str=None , __UpperCamelCase : Dict=3_2 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any=False , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : List[str] , )->List[Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_UpperCAmelCase = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = global_padding
_UpperCAmelCase = num_groups
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = embedding_dynamic_padding
_UpperCAmelCase = output_stride
_UpperCAmelCase = width_factor
_UpperCAmelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(__UpperCamelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 95
| 0
|
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def A__ ( A__ , A__ ) -> int:
'''simple docstring'''
_UpperCAmelCase = int(A__ )
assert noofclusters < len(A__ )
# Find out the dimensionality
_UpperCAmelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
_UpperCAmelCase = list(range(len(A__ ) ) )
shuffle(A__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_UpperCAmelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_UpperCAmelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_UpperCAmelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(A__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
_UpperCAmelCase = tf.placeholder("float64" , [dim] )
_UpperCAmelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(A__ , A__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_UpperCAmelCase = [tf.Variable(0 ) for i in range(len(A__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_UpperCAmelCase = tf.placeholder("int32" )
_UpperCAmelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(A__ , A__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_UpperCAmelCase = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_UpperCAmelCase = tf.reduce_mean(A__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_UpperCAmelCase = tf.placeholder("float" , [dim] )
_UpperCAmelCase = tf.placeholder("float" , [dim] )
_UpperCAmelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(A__ , A__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_UpperCAmelCase = tf.placeholder("float" , [noofclusters] )
_UpperCAmelCase = tf.argmin(A__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_UpperCAmelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(A__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_UpperCAmelCase = 100
for _ in range(A__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(A__ ) ):
_UpperCAmelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_UpperCAmelCase = [
sess.run(A__ , feed_dict={va: vect, va: sess.run(A__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_UpperCAmelCase = sess.run(
A__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(A__ ):
# Collect all the vectors assigned to this cluster
_UpperCAmelCase = [
vectors[i]
for i in range(len(A__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_UpperCAmelCase = sess.run(
A__ , feed_dict={mean_input: array(A__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_UpperCAmelCase = sess.run(A__ )
_UpperCAmelCase = sess.run(A__ )
return centroids, assignments
| 426
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : List[Any] = "nat"
A__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case_=4 , snake_case_=3 , snake_case_=64 , snake_case_=[3, 4, 6, 5] , snake_case_=[2, 4, 8, 16] , snake_case_=7 , snake_case_=3.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1e-5 , snake_case_=0.0 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Any:
super().__init__(**snake_case_ )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(snake_case_ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 426
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=_lowerCAmelCase , )
assert hasattr(self , '''env''' )
def A ( self : Any , _A : List[Any] ) -> List[str]:
UpperCAmelCase_ : Any = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
UpperCAmelCase_ : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_lowerCAmelCase , instance_count=_lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_lowerCAmelCase , py_version='''py36''' , )
def A ( self : Any , _A : Tuple ) -> Union[str, Any]:
TrainingJobAnalytics(_lowerCAmelCase ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def A ( self : Optional[Any] , _A : int ) -> List[Any]:
# create estimator
UpperCAmelCase_ : List[str] = self.create_estimator(_lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase_ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowerCAmelCase )
| 709
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_UpperCamelCase : List[str] = tuple[int, int]
class snake_case__ :
def __init__( self : Dict , _A : int , _A : int , _A : int , _A : int , _A : int , _A : Node | None , ) -> None:
UpperCAmelCase_ : str = pos_x
UpperCAmelCase_ : Union[str, Any] = pos_y
UpperCAmelCase_ : int = (pos_y, pos_x)
UpperCAmelCase_ : Tuple = goal_x
UpperCAmelCase_ : List[Any] = goal_y
UpperCAmelCase_ : List[str] = g_cost
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[str] = self.calculate_heuristic()
UpperCAmelCase_ : str = self.g_cost + self.h_cost
def A ( self : Union[str, Any] ) -> float:
UpperCAmelCase_ : List[str] = self.pos_x - self.goal_x
UpperCAmelCase_ : Dict = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_A ) + abs(_A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int , _A : Node ) -> bool:
return self.f_cost < other.f_cost
class snake_case__ :
def __init__( self : List[Any] , _A : TPosition , _A : TPosition ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A )
UpperCAmelCase_ : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _A )
UpperCAmelCase_ : Dict = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : List[Any] = False
def A ( self : Optional[int] ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_A )
self.closed_nodes.append(_A )
UpperCAmelCase_ : Optional[Any] = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase_ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
return [self.start.pos]
def A ( self : Any , _A : Node ) -> list[Node]:
UpperCAmelCase_ : Optional[Any] = []
for action in delta:
UpperCAmelCase_ : List[str] = parent.pos_x + action[1]
UpperCAmelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) )
return successors
def A ( self : List[Any] , _A : Node | None ) -> list[TPosition]:
UpperCAmelCase_ : List[Any] = node
UpperCAmelCase_ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[Any] = current_node.parent
path.reverse()
return path
class snake_case__ :
def __init__( self : int , _A : TPosition , _A : TPosition ) -> None:
UpperCAmelCase_ : Any = AStar(_A , _A )
UpperCAmelCase_ : Dict = AStar(_A , _A )
UpperCAmelCase_ : Union[str, Any] = False
def A ( self : Union[str, Any] ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : Optional[Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : Optional[int] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_A , _A )
self.fwd_astar.closed_nodes.append(_A )
self.bwd_astar.closed_nodes.append(_A )
UpperCAmelCase_ : int = current_bwd_node
UpperCAmelCase_ : int = current_fwd_node
UpperCAmelCase_ : List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(_A ),
self.bwd_astar: self.bwd_astar.get_successors(_A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase_ : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_A )
else:
astar.open_nodes.append(_A )
return [self.fwd_astar.start.pos]
def A ( self : List[Any] , _A : Node , _A : Node ) -> list[TPosition]:
UpperCAmelCase_ : Dict = self.fwd_astar.retrace_path(_A )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_UpperCamelCase : Optional[int] = (0, 0)
_UpperCamelCase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCamelCase : str = time.time()
_UpperCamelCase : int = AStar(init, goal)
_UpperCamelCase : Any = a_star.search()
_UpperCamelCase : str = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_UpperCamelCase : Union[str, Any] = time.time()
_UpperCamelCase : Dict = BidirectionalAStar(init, goal)
_UpperCamelCase : List[str] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 216
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__A : Any = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
__A , __A : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
__A : Optional[Any] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
__A : Tuple = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__A : Tuple = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 130
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = LEDTokenizer
UpperCAmelCase__ = LEDTokenizerFast
UpperCAmelCase__ = True
def _lowercase ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _lowercase ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _lowercase ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _lowercase ( self , _A ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=_A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''']
UpperCAmelCase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , return_tensors='''pt''' )
UpperCAmelCase = tokenizer(text_target=_A , return_tensors='''pt''' )
UpperCAmelCase = inputs['''input_ids''']
UpperCAmelCase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ['''Summary of the text.''', '''Another summary.''']
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(_A , padding=_A )
UpperCAmelCase = [[0] * len(_A ) for x in encoded_output['''input_ids''']]
UpperCAmelCase = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
UpperCAmelCase = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 130
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Any:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = self.vocab_size - 1
def snake_case__ ( self ) -> List[str]:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = OpenAIGPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
A__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = OpenAIGPTLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = OpenAIGPTDoubleHeadsModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = self.num_labels
A__ = OpenAIGPTForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ) -> Tuple:
A__ = self.prepare_config_and_inputs()
(
A__
) = config_and_inputs
A__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ : List[str] = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Any:
A__ = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
A__ = inputs_dict["labels"]
A__ = inputs_dict["labels"]
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def snake_case__ ( self ) -> List[Any]:
A__ = OpenAIGPTModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def snake_case__ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase )
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase )
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase )
@slow
def snake_case__ ( self ) -> Optional[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = OpenAIGPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self ) -> int:
A__ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCamelCase )
A__ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is
A__ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A__ = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562
| 0
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 187
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
lowercase = value
else:
lowercase = value
return new_state_dict
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=False )-> Tuple:
"""simple docstring"""
lowercase = ''''''
if is_panoptic:
lowercase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowercase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[:256, :]
lowercase = in_proj_bias[:256]
lowercase = in_proj_weight[256:512, :]
lowercase = in_proj_bias[256:512]
lowercase = in_proj_weight[-256:, :]
lowercase = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Any:
"""simple docstring"""
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(UpperCAmelCase, stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase = '''resnet101'''
if "dc5" in model_name:
lowercase = True
lowercase = '''panoptic''' in model_name
if is_panoptic:
lowercase = 250
else:
lowercase = 91
lowercase = '''huggingface/label-files'''
lowercase = '''coco-detection-id2label.json'''
lowercase = json.load(open(hf_hub_download(UpperCAmelCase, UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
lowercase = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# load image processor
lowercase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase = ConditionalDetrImageProcessor(format=UpperCAmelCase )
# prepare image
lowercase = prepare_img()
lowercase = image_processor(images=UpperCAmelCase, return_tensors='''pt''' )
lowercase = encoding['''pixel_values''']
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
lowercase = torch.hub.load('''DeppMeng/ConditionalDETR''', UpperCAmelCase, pretrained=UpperCAmelCase ).eval()
lowercase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase = '''conditional_detr.''' + src
rename_key(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
lowercase = rename_backbone_keys(UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase, is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
# finally, create HuggingFace model and load state dict
lowercase = ConditionalDetrForSegmentation(UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=UpperCAmelCase, organization='''DepuMeng''', commit_message='''Add model''' )
# verify our conversion
lowercase = conditional_detr(UpperCAmelCase )
lowercase = model(UpperCAmelCase )
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1e-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1e-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 604
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
def __init__( self : Any , lowerCamelCase_ : UNetaDModel , lowerCamelCase_ : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 20_00 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[Any] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : str = self.unet
SCREAMING_SNAKE_CASE : Any = randn_tensor(__a , generator=__a ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(self.device )
self.scheduler.set_timesteps(__a )
self.scheduler.set_sigmas(__a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(__a , __a ).sample
SCREAMING_SNAKE_CASE : Any = self.scheduler.step_correct(__a , __a , generator=__a ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(__a , __a ).sample
SCREAMING_SNAKE_CASE : int = self.scheduler.step_pred(__a , __a , __a , generator=__a )
SCREAMING_SNAKE_CASE : List[str] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : Tuple = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : List[Any] = self.numpy_to_pil(__a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a )
| 720
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_activation("""swish""" )
self.assertIsInstance(lowerCamelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = get_activation("""silu""" )
self.assertIsInstance(lowerCamelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_activation("""mish""" )
self.assertIsInstance(lowerCamelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = get_activation("""gelu""" )
self.assertIsInstance(lowerCamelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 79
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( _lowerCamelCase ) ->Union[str, Any]:
if isinstance(_lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _a :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
pass
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F"Difference between torch and flax is {diff} (>= {tol})." )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_UpperCAmelCase =VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_UpperCAmelCase =FlaxVisionTextDualEncoderModel(_snake_case )
_UpperCAmelCase =model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_UpperCAmelCase , _UpperCAmelCase =self.get_vision_text_model(_snake_case , _snake_case )
_UpperCAmelCase ={"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_UpperCAmelCase =model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_UpperCAmelCase , _UpperCAmelCase =self.get_vision_text_model(_snake_case , _snake_case )
_UpperCAmelCase ={"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_UpperCAmelCase =model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_UpperCAmelCase =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_pretrained(_snake_case )
_UpperCAmelCase =model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_UpperCAmelCase =after_output[0]
_UpperCAmelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-3 )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_UpperCAmelCase , _UpperCAmelCase =self.get_vision_text_model(_snake_case , _snake_case )
_UpperCAmelCase ={"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_UpperCAmelCase =model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_UpperCAmelCase =output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase =to_atuple(vision_model.config.image_size )
_UpperCAmelCase =to_atuple(vision_model.config.patch_size )
_UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase =output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
pt_model.to(_snake_case )
pt_model.eval()
# prepare inputs
_UpperCAmelCase =inputs_dict
_UpperCAmelCase ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCAmelCase =pt_model(**_snake_case ).to_tuple()
_UpperCAmelCase =fx_model(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_snake_case , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_snake_case )
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_pretrained(_snake_case , from_pt=_snake_case )
_UpperCAmelCase =fx_model_loaded(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_snake_case , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_snake_case )
_UpperCAmelCase =VisionTextDualEncoderModel.from_pretrained(_snake_case , from_flax=_snake_case )
pt_model_loaded.to(_snake_case )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCAmelCase =pt_model_loaded(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_snake_case , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_UpperCAmelCase =VisionTextDualEncoderModel(_snake_case )
_UpperCAmelCase =FlaxVisionTextDualEncoderModel(_snake_case )
_UpperCAmelCase =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _snake_case )
_UpperCAmelCase =fx_state
self.check_pt_flax_equivalence(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_UpperCAmelCase =VisionTextDualEncoderModel(_snake_case )
_UpperCAmelCase =FlaxVisionTextDualEncoderModel(_snake_case )
_UpperCAmelCase =load_flax_weights_in_pytorch_model(_snake_case , fx_model.params )
self.check_pt_flax_equivalence(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
_UpperCAmelCase =config_inputs_dict.pop("vision_config" )
_UpperCAmelCase =config_inputs_dict.pop("text_config" )
_UpperCAmelCase =config_inputs_dict
self.check_equivalence_pt_to_flax(_snake_case , _snake_case , _snake_case )
self.check_equivalence_flax_to_pt(_snake_case , _snake_case , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase , _UpperCAmelCase =self.get_pretrained_model_and_inputs()
_UpperCAmelCase =model_a(**_snake_case )
_UpperCAmelCase =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_pretrained(_snake_case )
_UpperCAmelCase =model_a(**_snake_case )
_UpperCAmelCase =after_outputs[0]
_UpperCAmelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
@require_flax
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_snake_case , text_from_pt=_snake_case , )
_UpperCAmelCase =13
_UpperCAmelCase =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase =ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase =random_attention_mask([batch_size, 4] )
_UpperCAmelCase ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =FlaxViTModel(_snake_case )
_UpperCAmelCase =FlaxBertModel(_snake_case )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =FlaxViTModelTester(self )
_UpperCAmelCase =FlaxBertModelTester(self )
_UpperCAmelCase =vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase =bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase =vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_snake_case , text_from_pt=_snake_case , )
_UpperCAmelCase =13
_UpperCAmelCase =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase =ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase =random_attention_mask([batch_size, 4] )
_UpperCAmelCase ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =FlaxCLIPVisionModel(_snake_case )
_UpperCAmelCase =FlaxBertModel(_snake_case )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =FlaxCLIPVisionModelTester(self )
_UpperCAmelCase =FlaxBertModelTester(self )
_UpperCAmelCase =clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase =bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase =vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
_UpperCAmelCase =VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_UpperCAmelCase =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase =processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_snake_case , padding=_snake_case , return_tensors="np" )
_UpperCAmelCase =model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _snake_case , atol=1E-3 ) )
| 408
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a ( A__ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
_UpperCAmelCase =params
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _snake_case ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def SCREAMING_SNAKE_CASE ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.params.max_model_input_size
_UpperCAmelCase =self.lengths > max_len
logger.info(F"Splitting {sum(_snake_case )} too long sequences." )
def divide_chunks(_snake_case , _snake_case ):
return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )]
_UpperCAmelCase =[]
_UpperCAmelCase =[]
if self.params.mlm:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_UpperCAmelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_UpperCAmelCase =np.insert(_snake_case , 0 , _snake_case )
if sub_s[-1] != sep_id:
_UpperCAmelCase =np.insert(_snake_case , len(_snake_case ) , _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =len(self )
_UpperCAmelCase =self.lengths > 11
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def SCREAMING_SNAKE_CASE ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =len(self )
_UpperCAmelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_UpperCAmelCase =(unk_occs / self.lengths) < 0.5
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def SCREAMING_SNAKE_CASE ( self ):
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =[t[0] for t in batch]
_UpperCAmelCase =[t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
_UpperCAmelCase =max(_snake_case )
# Pad token ids
if self.params.mlm:
_UpperCAmelCase =self.params.special_tok_ids["pad_token"]
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =[list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
_UpperCAmelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
_UpperCAmelCase =torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 408
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def UpperCAmelCase ( a_ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
lowerCamelCase : List[Any] = nums[0]
for i in range(1, len(a_ ) ):
lowerCamelCase : Tuple = nums[i]
lowerCamelCase : List[Any] = max(a_, ans + num, a_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_A = int(input('Enter number of elements : ').strip())
_A = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 701
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_A = 2_5_0_0_0_4
_A = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = MBartaaTokenizer
lowercase_ = MBartaaTokenizerFast
lowercase_ = True
lowercase_ = True
def _UpperCamelCase ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Tuple:
lowerCamelCase : str = '<s>'
lowerCamelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase_ ) , 1054 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : Optional[int] = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_ )
lowerCamelCase : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
lowerCamelCase : Optional[Any] = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def _UpperCamelCase ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase : int = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Tuple = tempfile.mkdtemp()
lowerCamelCase : Any = tokenizer_r.save_pretrained(UpperCAmelCase_ )
lowerCamelCase : List[str] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase : int = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCamelCase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCamelCase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase : List[str] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase : int = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
lowercase_ = 'facebook/mbart-large-50-one-to-many-mmt'
lowercase_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase_ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def _UpperCamelCase ( cls ) -> int:
lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase : Union[str, Any] = 1
return cls
def _UpperCamelCase ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 250038 )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
lowerCamelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase : Union[str, Any] = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Tuple = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCAmelCase_ )
lowerCamelCase : str = 10
lowerCamelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250053, 250001] )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[str] = tempfile.mkdtemp()
lowerCamelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCamelCase : str = MBartaaTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ )
@require_torch
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors='pt' )
lowerCamelCase : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase : Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
lowerCamelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' )
lowerCamelCase : List[Any] = targets['input_ids']
lowerCamelCase : List[Any] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : List[Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# en_XX, A, test, EOS
'input_ids': [[250004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 133
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """timesformer"""
def __init__( self , lowerCAmelCase=224 , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=8 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-6 , lowerCAmelCase=True , lowerCAmelCase="divided_space_time" , lowerCAmelCase=0 , **lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =num_frames
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =qkv_bias
_lowercase =attention_type
_lowercase =drop_path_rate
| 291
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
def a ( A__ : Optional[int] , A__ : Union[str, Any] ) -> int:
"""simple docstring"""
_lowercase =nn.functional.normalize(A__ )
_lowercase =nn.functional.normalize(A__ )
return torch.mm(A__ , normalized_text_embeds.t() )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = CLIPConfig
_a = ["""CLIPEncoderLayer"""]
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =CLIPVisionModel(config.vision_config )
_lowercase =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase )
_lowercase =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase )
_lowercase =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase )
_lowercase =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase )
_lowercase =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase )
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.vision_model(lowerCAmelCase )[1] # pooled_output
_lowercase =self.visual_projection(lowerCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase =cosine_distance(lowerCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowercase =cosine_distance(lowerCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowercase =[]
_lowercase =image_embeds.shape[0]
for i in range(lowerCAmelCase ):
_lowercase ={'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowercase =special_cos_dist[i][concept_idx]
_lowercase =self.special_care_embeds_weights[concept_idx].item()
_lowercase =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_lowercase =0.01
for concept_idx in range(len(cos_dist[0] ) ):
_lowercase =cos_dist[i][concept_idx]
_lowercase =self.concept_embeds_weights[concept_idx].item()
_lowercase =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase )
result.append(lowerCAmelCase )
_lowercase =[len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =self.vision_model(lowerCAmelCase )[1] # pooled_output
_lowercase =self.visual_projection(lowerCAmelCase )
_lowercase =cosine_distance(lowerCAmelCase , self.special_care_embeds )
_lowercase =cosine_distance(lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase =0.0
_lowercase =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowercase =torch.any(special_scores > 0 , dim=1 )
_lowercase =special_care * 0.01
_lowercase =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowercase =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowercase =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 291
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( A__ ) -> str:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __a ( A__ , A__ ) -> Optional[int]:
lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCAmelCase = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCAmelCase = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCAmelCase = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCAmelCase = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCAmelCase = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCAmelCase = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCAmelCase = key.replace("image_encoder.module" , "flava.image_model" )
lowerCAmelCase = key.replace("text_encoder.module" , "flava.text_model" )
lowerCAmelCase = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCAmelCase = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCAmelCase = key.replace("text_projection" , "flava.text_projection" )
lowerCAmelCase = key.replace("image_projection" , "flava.image_projection" )
lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase = value
return upgrade
@torch.no_grad()
def __a ( A__ , A__ , A__ , A__=None ) -> Tuple:
if config_path is not None:
lowerCAmelCase = FlavaConfig.from_pretrained(snake_case_ )
else:
lowerCAmelCase = FlavaConfig()
lowerCAmelCase = FlavaForPreTraining(snake_case_ ).eval()
lowerCAmelCase = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ )
if os.path.exists(snake_case_ ):
lowerCAmelCase = torch.load(snake_case_ , map_location="cpu" )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
lowerCAmelCase = upgrade_state_dict(snake_case_ , snake_case_ )
hf_model.load_state_dict(snake_case_ )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(snake_case_ )
lowerCAmelCase = count_parameters(snake_case_ ) + count_parameters(snake_case_ )
assert torch.allclose(snake_case_ , snake_case_ , atol=1e-3 )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase : Dict = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 703
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase : Optional[int] = random.Random()
def __a ( A__ , A__=1.0 , A__=None , A__=None ) -> Any:
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=7 , SCREAMING_SNAKE_CASE : Optional[Any]=4_0_0 , SCREAMING_SNAKE_CASE : Optional[Any]=2_0_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=1_6_0_0_0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=8_0 , SCREAMING_SNAKE_CASE : int=1_6 , SCREAMING_SNAKE_CASE : Any=6_4 , SCREAMING_SNAKE_CASE : List[Any]="hann_window" , SCREAMING_SNAKE_CASE : Dict=8_0 , SCREAMING_SNAKE_CASE : Any=7_6_0_0 , SCREAMING_SNAKE_CASE : Optional[Any]=1E-10 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = do_normalize
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = return_attention_mask
def __A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> str:
"""simple docstring"""
def _flatten(SCREAMING_SNAKE_CASE : List[Any] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Optional[int]=False ) -> str:
"""simple docstring"""
if equal_length:
lowerCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = SpeechTaFeatureExtractor
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = SpeechTaFeatureExtractionTester(self )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1E-3 ) )
def __A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __A ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCAmelCase = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = feat_extract(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __A ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def __A ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase = feature_extractor(audio_target=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE )
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.feat_extract_dict
lowerCAmelCase = True
lowerCAmelCase = self.feature_extraction_class(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase = [len(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCAmelCase = feat_extract.model_input_names[0]
lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase = min(SCREAMING_SNAKE_CASE )
lowerCAmelCase = feat_extract.num_mel_bins # hack!
lowerCAmelCase = feat_extract.pad(
SCREAMING_SNAKE_CASE , padding="max_length" , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="np" )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = SpeechTaFeatureExtractor()
lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , SCREAMING_SNAKE_CASE , atol=1E-6 ) )
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = SpeechTaFeatureExtractor()
lowerCAmelCase = feature_extractor(audio_target=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 159
| 0
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 10_00 ) -> int:
_snake_case = 3
_snake_case = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 224
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=True , __lowerCamelCase : int="pt" ) -> int:
_snake_case = {'''add_prefix_space''': True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(''' ''' ) else {}
_snake_case = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None , ) -> Any:
_snake_case = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase__ ( A_ ):
def __init__( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str="train" , _lowerCamelCase : List[str]=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]="" , ):
super().__init__()
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' )
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' )
_snake_case = self.get_char_lens(self.src_file )
_snake_case = max_source_length
_snake_case = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_snake_case = tokenizer
_snake_case = prefix
if n_obs is not None:
_snake_case = self.src_lens[:n_obs]
_snake_case = src_lang
_snake_case = tgt_lang
def __len__( self : List[str] ):
return len(self.src_lens )
def __getitem__( self : str , _lowerCamelCase : List[str] ):
_snake_case = index + 1 # linecache starts at 1
_snake_case = self.prefix + linecache.getline(str(self.src_file ) , _lowerCamelCase ).rstrip('''\n''' )
_snake_case = linecache.getline(str(self.tgt_file ) , _lowerCamelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
)
_snake_case = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_source_length , '''right''' )
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_target_length , '''right''' )
_snake_case = source_inputs['''input_ids'''].squeeze()
_snake_case = target_inputs['''input_ids'''].squeeze()
_snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( _lowerCamelCase : Optional[Any] ):
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def lowercase ( self : int , _lowerCamelCase : int ):
_snake_case = torch.stack([x['''input_ids'''] for x in batch] )
_snake_case = torch.stack([x['''attention_mask'''] for x in batch] )
_snake_case = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase )
_snake_case , _snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase )
_snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : List[List] ) -> Any:
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , '''git_log.json''' ) )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=4 , **__lowerCamelCase : Union[str, Any] ) -> str:
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def _UpperCAmelCase ( ) -> str:
_snake_case = git.Repo(search_parent_directories=__lowerCamelCase )
_snake_case = {
'''repo_id''': str(__lowerCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _UpperCAmelCase ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ) -> List:
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Tuple:
with open(__lowerCamelCase , '''wb''' ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
def remove_articles(__lowerCamelCase : Tuple ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : int ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : List[Any] ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> Any:
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
_snake_case = sum(common.values() )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : int ) -> Any:
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Dict:
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_snake_case = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
return model_prefix.startswith('''rag''' )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
_snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
_snake_case = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 224
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = (KDPMaDiscreteScheduler,)
lowerCamelCase :str = 10
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
_A = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase_ )
return config
def UpperCAmelCase ( self ) -> str:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = output.prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCAmelCase ( self ) -> str:
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = output.prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCAmelCase ( self ) -> List[Any]:
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , lowerCAmelCase_ )
_A = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = output.prev_sample
_A = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A = torch.mean(torch.abs(lowerCAmelCase_ ) )
if str(lowerCAmelCase_ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 83
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCamelCase (nn.Module ):
def __init__( self: Union[str, Any],A_: nn.Module,A_: int ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = module
__UpperCamelCase = nn.Sequential(
nn.Linear(module.in_features,A_,bias=A_ ),nn.Linear(A_,module.out_features,bias=A_ ),)
__UpperCamelCase = (2.0 / (5 * min(module.in_features,module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight,std=A_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case_ ( self: Optional[Any],A_: int,*A_: Union[str, Any],**A_: str ):
'''simple docstring'''
return self.module(A_,*A_,**A_ ) + self.adapter(A_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase (unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowercase = """bigscience/bloom-1b7"""
# Constant values
_lowercase = 2.109659552692574
_lowercase = """Hello my name is"""
_lowercase = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_lowercase = 10
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class __lowerCamelCase (_a ):
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,torch_dtype=torch.floataa,device_map='auto' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.model_abit.config
self.assertTrue(hasattr(A_,'quantization_config' ) )
__UpperCamelCase = config.to_dict()
__UpperCamelCase = config.to_diff_dict()
__UpperCamelCase = config.to_json_string()
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__UpperCamelCase = self.model_fpaa.get_memory_footprint()
__UpperCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit,self.EXPECTED_RELATIVE_DIFFERENCE )
__UpperCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case_ ( self: str ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A_,torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
__UpperCamelCase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = BitsAndBytesConfig()
__UpperCamelCase = True
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
__UpperCamelCase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
with self.assertRaises(A_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A_ )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BitsAndBytesConfig()
with self.assertRaises(A_ ):
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,quantization_config=A_,load_in_abit=A_,device_map='auto',bnb_abit_quant_type='nf4',)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
with self.assertRaises(A_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(A_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
__UpperCamelCase = self.model_fpaa.to(torch.floataa )
__UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.half()
# Check this does not throw an error
__UpperCamelCase = self.model_fpaa.float()
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('t5-small',load_in_abit=A_,device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 't5-small'
__UpperCamelCase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
__UpperCamelCase = 'Translate in German: Hello, my dog is cute'
def snake_case_ ( self: Dict ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__UpperCamelCase = None
# test with `t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
# test with `flan-t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
__UpperCamelCase = modules
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q,bnb.nn.Linearabit ) )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
# test with `flan-t5-small`
__UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name,load_in_abit=A_,device_map='auto' )
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' ).to(0 )
__UpperCamelCase = model.generate(**A_ )
class __lowerCamelCase (_a ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# model_name
__UpperCamelCase = 'bigscience/bloom-560m'
__UpperCamelCase = 't5-small'
# Different types of model
__UpperCamelCase = AutoModel.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
# Sequence classification model
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name,load_in_abit=A_,device_map='auto' )
# CausalLM model
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_,device_map='auto' )
# Seq2seq model
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name,load_in_abit=A_,device_map='auto' )
def snake_case_ ( self: Any ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: str ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCamelCase (_a ):
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'text-generation',model=self.model_name,model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa},max_new_tokens=self.MAX_NEW_TOKENS,)
# Real second forward pass
__UpperCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'],self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCamelCase (_a ):
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name,load_in_abit=A_,device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ),{0, 1} )
# Check that inference pass works on the model
__UpperCamelCase = self.tokenizer(self.input_text,return_tensors='pt' )
# Second real batch
__UpperCamelCase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ),max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0],skip_special_tokens=A_ ),self.EXPECTED_OUTPUTS )
class __lowerCamelCase (_a ):
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/opt-350m'
super().setUp()
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name,load_in_abit=A_ )
self.assertEqual(set(model.hf_device_map.values() ),{torch.cuda.current_device()} )
for param in model.parameters():
__UpperCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__UpperCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A_ ) ):
__UpperCamelCase = LoRALayer(module.q_proj,rank=16 )
__UpperCamelCase = LoRALayer(module.k_proj,rank=16 )
__UpperCamelCase = LoRALayer(module.v_proj,rank=16 )
# Step 3: dummy batch
__UpperCamelCase = self.tokenizer('Test batch ',return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__UpperCamelCase = model.forward(**A_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A_,A_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A_,nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCamelCase (_a ):
_lowercase = """gpt2-xl"""
_lowercase = 3.3191854854152187
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
'''simple docstring'''
a = PegasusConfig
a = {}
a = "gelu"
def __init__( self : Dict , _snake_case : Any , _snake_case : int=13 , _snake_case : str=7 , _snake_case : int=True , _snake_case : Tuple=False , _snake_case : int=99 , _snake_case : Any=32 , _snake_case : int=2 , _snake_case : Optional[int]=4 , _snake_case : int=37 , _snake_case : Optional[Any]=0.1 , _snake_case : str=0.1 , _snake_case : List[Any]=40 , _snake_case : Any=2 , _snake_case : int=1 , _snake_case : int=0 , ) -> Any:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
def lowerCAmelCase_ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_pegasus_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def lowerCAmelCase_ ( self : Any , _snake_case : Union[str, Any] , _snake_case : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = TFPegasusModel(config=_snake_case ).get_decoder()
SCREAMING_SNAKE_CASE__ = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case )[0]
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Any:
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a = True
a = False
a = False
def lowerCAmelCase_ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_snake_case )
def lowerCAmelCase_ ( self : str ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
a = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a = "google/pegasus-xsum"
@cached_property
def lowerCAmelCase_ ( self : str ) -> Tuple:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase_ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase_ ( self : Tuple , **_snake_case : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.translate_src_text(**_snake_case )
assert self.expected_text == generated_words
def lowerCAmelCase_ ( self : Optional[Any] , **_snake_case : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , **_snake_case , padding=_snake_case , return_tensors="tf" )
SCREAMING_SNAKE_CASE__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_snake_case , )
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )
return generated_words
@slow
def lowerCAmelCase_ ( self : Optional[int] ) -> Any:
self._assert_generated_batch_equal_expected()
| 538
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 538
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCAmelCase__ = random.Random()
def a_ (__A , __A=1.0 , __A=None , __A=None ) -> int:
"""simple docstring"""
if rng is None:
__a : List[Any] = global_rng
__a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: str=7 , __UpperCAmelCase: Dict=400 , __UpperCAmelCase: Any=2000 , __UpperCAmelCase: str=2048 , __UpperCAmelCase: Tuple=128 , __UpperCAmelCase: str=1 , __UpperCAmelCase: Any=512 , __UpperCAmelCase: Dict=30 , __UpperCAmelCase: Dict=44100 , ) -> str:
'''simple docstring'''
__a : Union[str, Any] = parent
__a : str = batch_size
__a : Dict = min_seq_length
__a : Dict = max_seq_length
__a : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Optional[int] = spectrogram_length
__a : Union[str, Any] = feature_size
__a : int = num_audio_channels
__a : List[Any] = hop_length
__a : Union[str, Any] = chunk_length
__a : Any = sampling_rate
def UpperCAmelCase__ (self: Dict ) -> List[Any]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: Any=False , __UpperCAmelCase: Dict=False ) -> Tuple:
'''simple docstring'''
def _flatten(__UpperCAmelCase: Optional[int] ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
__a : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Tuple = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = TvltFeatureExtractor
def UpperCAmelCase__ (self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
__a : str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ (self: str ) -> int:
'''simple docstring'''
__a : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "sampling_rate" ) )
def UpperCAmelCase__ (self: List[Any] ) -> int:
'''simple docstring'''
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : str = feat_extract_first.save_pretrained(__UpperCAmelCase )[0]
check_json_file_has_correct_format(__UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(__UpperCAmelCase )
__a : List[str] = feat_extract_first.to_dict()
__a : Union[str, Any] = feat_extract_second.to_dict()
__a : str = dict_first.pop("mel_filters" )
__a : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ (self: Union[str, Any] ) -> int:
'''simple docstring'''
__a : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Tuple = os.path.join(__UpperCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCAmelCase )
__a : List[Any] = self.feature_extraction_class.from_json_file(__UpperCAmelCase )
__a : List[Any] = feat_extract_first.to_dict()
__a : Tuple = feat_extract_second.to_dict()
__a : Any = dict_first.pop("mel_filters" )
__a : Union[str, Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ (self: List[str] ) -> List[Any]:
'''simple docstring'''
__a : Any = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : List[str] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : Union[str, Any] = feature_extractor(__UpperCAmelCase , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : Optional[int] = feature_extractor(
__UpperCAmelCase , return_tensors="np" , sampling_rate=44100 , mask_audio=__UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Tuple = np.asarray(__UpperCAmelCase )
__a : int = feature_extractor(__UpperCAmelCase , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ (self: int , __UpperCAmelCase: str ) -> str:
'''simple docstring'''
__a : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a : Optional[int] = ds.sort("id" ).select(range(__UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ (self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
__a : str = self._load_datasamples(1 )
__a : Any = TvltFeatureExtractor()
__a : Tuple = feature_extractor(__UpperCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Any = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCAmelCase , atol=1E-4 ) )
| 351
|
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCamelCase__ : Any = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCAmelCase ) )
return round(__UpperCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
| 0
|
from collections.abc import Generator
def __UpperCamelCase ( ):
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1
while True:
lowerCAmelCase_ , lowerCAmelCase_ = b, a + b
yield b
def __UpperCamelCase ( _A = 1000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = fibonacci_generator()
while len(str(next(_A ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 711
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __UpperCAmelCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self, UpperCamelCase__ = 1, UpperCamelCase__ = None, UpperCamelCase__ = 0.0, UpperCamelCase__ = 50, UpperCamelCase__ = "pil", UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=UpperCamelCase__, )
lowerCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase_ = self.scheduler.scale_model_input(UpperCamelCase__, UpperCamelCase__ )
# predict the noise residual
lowerCAmelCase_ = self.unet(UpperCamelCase__, UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase_ = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0, 1 )
lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 325
| 0
|
def snake_case__ ( UpperCAmelCase : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowerCAmelCase__ : Tuple = """CIDAS/clipseg-rd64-refined"""
lowerCAmelCase__ : Optional[Any] = """image_segmenter"""
lowerCAmelCase__ : Optional[Any] = CLIPSegForImageSegmentation
lowerCAmelCase__ : Any = ["""image""", """text"""]
lowerCAmelCase__ : Optional[Any] = ["""image"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with torch.no_grad():
a_ : List[Any] = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
a_ : List[Any] = outputs.cpu().detach().numpy()
a_ : Optional[Any] = 0
a_ : Optional[int] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 473
| 0
|
from __future__ import annotations
def _lowerCAmelCase ( _a : str ) -> bool:
return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__UpperCamelCase = ["""accelerate""", """launch"""]
__UpperCamelCase = Path.home() / """.cache/huggingface/accelerate"""
__UpperCamelCase = """default_config.yaml"""
__UpperCamelCase = config_folder / config_file
__UpperCamelCase = config_folder / """_default_config.yaml"""
__UpperCamelCase = Path("""tests/test_configs""" )
@classmethod
def UpperCAmelCase__ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self ):
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self ):
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = """test-tpu"""
__UpperCamelCase = """us-central1-a"""
__UpperCamelCase = """ls"""
__UpperCamelCase = ["""accelerate""", """tpu-config"""]
__UpperCamelCase = """cd /usr/share"""
__UpperCamelCase = """tests/test_samples/test_command_file.sh"""
__UpperCamelCase = """Running gcloud compute tpus tpu-vm ssh"""
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=_lowercase )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
| 440
| 0
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
# Initialise PyTorch model
lowercase : List[str] = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
lowercase : str = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
else:
lowercase : List[Any] = BigBirdForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , is_trivia_qa=SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 336
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowercase : Optional[Any] = """http://www.mocksite.com/file1.txt"""
lowercase : str = """\"text\": [\"foo\", \"foo\"]"""
lowercase : Tuple = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __snake_case :
_a : Tuple= 200
_a : int= {"Content-Length": "100"}
_a : Tuple= {}
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return [bytes(snake_case ,"""utf-8""" )]
def _snake_case( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE__ , """request""" , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = URL
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = url
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = [url]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = {"""train""": url}
lowercase : Union[str, Any] = """dummy"""
lowercase : List[Any] = """downloads"""
lowercase : Optional[Any] = tmp_path
lowercase : Dict = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , use_etag=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = dl_manager.download(SCREAMING_SNAKE_CASE__ )
lowercase : str = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = [downloaded_paths]
lowercase : Any = [urls]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in downloaded_paths.keys()
lowercase : Tuple = downloaded_paths.values()
lowercase : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ )
lowercase : str = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowercase : Tuple = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowercase : Dict = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = filename
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = [filename]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = {"""train""": filename}
lowercase : int = """dummy"""
lowercase : List[Any] = xz_file.parent
lowercase : Union[str, Any] = """extracted"""
lowercase : List[str] = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE__ , use_etag=SCREAMING_SNAKE_CASE__ , )
lowercase : List[Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dl_manager.extract(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = [extracted_paths]
lowercase : Optional[Any] = [paths]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in extracted_paths.keys()
lowercase : Optional[Any] = extracted_paths.values()
lowercase : Optional[int] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase : List[Any] = Path(SCREAMING_SNAKE_CASE__ )
lowercase : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE__ , etag=SCREAMING_SNAKE_CASE__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase : Optional[int] = extracted_path.read_text()
lowercase : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE__ , start=1 ):
lowercase : Optional[Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : str = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_tar == 1
assert num_jsonl == 2
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 336
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase_ = (720, 1280) # Height, Width
lowercase_ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase_ = 1 / 100
lowercase_ = ''
lowercase_ = ''
lowercase_ = ''
lowercase_ = 250
def lowerCAmelCase ( ):
"""simple docstring"""
__A , __A = get_dataset(__UpperCamelCase , __UpperCamelCase )
for index in range(__UpperCamelCase ):
__A = random.sample(range(len(__UpperCamelCase ) ) , 4 )
__A , __A , __A = update_image_and_anno(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , filter_scale=__UpperCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A = random_chars(3_2 )
__A = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__A = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
__A = []
for anno in new_annos:
__A = anno[3] - anno[1]
__A = anno[4] - anno[2]
__A = anno[1] + width / 2
__A = anno[2] + height / 2
__A = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__UpperCamelCase )
with open(f'{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = []
__A = []
for label_file in glob.glob(os.path.join(__UpperCamelCase , '''*.txt''' ) ):
__A = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__UpperCamelCase ) as in_file:
__A = in_file.readlines()
__A = os.path.join(__UpperCamelCase , f'{label_name}.jpg' )
__A = []
for obj_list in obj_lists:
__A = obj_list.rstrip('''\n''' ).split(''' ''' )
__A = float(obj[1] ) - float(obj[3] ) / 2
__A = float(obj[2] ) - float(obj[4] ) / 2
__A = float(obj[1] ) + float(obj[3] ) / 2
__A = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , ):
"""simple docstring"""
__A = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__A = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__A = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__A = int(scale_x * output_size[1] )
__A = int(scale_y * output_size[0] )
__A = []
__A = []
for i, index in enumerate(__UpperCamelCase ):
__A = all_img_list[index]
path_list.append(__UpperCamelCase )
__A = all_annos[index]
__A = cva.imread(__UpperCamelCase )
if i == 0: # top-left
__A = cva.resize(__UpperCamelCase , (divid_point_x, divid_point_y) )
__A = img
for bbox in img_annos:
__A = bbox[1] * scale_x
__A = bbox[2] * scale_y
__A = bbox[3] * scale_x
__A = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__A = cva.resize(__UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__A = img
for bbox in img_annos:
__A = scale_x + bbox[1] * (1 - scale_x)
__A = bbox[2] * scale_y
__A = scale_x + bbox[3] * (1 - scale_x)
__A = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__A = cva.resize(__UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__A = img
for bbox in img_annos:
__A = bbox[1] * scale_x
__A = scale_y + bbox[2] * (1 - scale_y)
__A = bbox[3] * scale_x
__A = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__A = cva.resize(
__UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__A = img
for bbox in img_annos:
__A = scale_x + bbox[1] * (1 - scale_x)
__A = scale_y + bbox[2] * (1 - scale_y)
__A = scale_x + bbox[3] * (1 - scale_x)
__A = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__A = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__A = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 215
|
"""simple docstring"""
lowercase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 215
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = SwinConfig(image_size=192 )
if "base" in model_name:
lowercase = 6
lowercase = 128
lowercase = (2, 2, 18, 2)
lowercase = (4, 8, 16, 32)
elif "large" in model_name:
lowercase = 12
lowercase = 192
lowercase = (2, 2, 18, 2)
lowercase = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
lowercase = window_size
lowercase = embed_dim
lowercase = depths
lowercase = num_heads
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
if "encoder.mask_token" in name:
lowercase = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
lowercase = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
lowercase = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase = "layernorm.weight"
if name == "encoder.norm.bias":
lowercase = "layernorm.bias"
if "decoder" in name:
pass
else:
lowercase = "swin." + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[2] )
lowercase = int(key_split[4] )
lowercase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[
:dim
]
lowercase = val[
dim : dim * 2
]
lowercase = val[
-dim:
]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any ):
lowercase = torch.load(lowerCamelCase__ , map_location="""cpu""" )["model"]
lowercase = get_swin_config(lowerCamelCase__ )
lowercase = SwinForMaskedImageModeling(lowerCamelCase__ )
model.eval()
lowercase = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase = ViTImageProcessor(size={"""height""": 192, """width""": 192} )
lowercase = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCamelCase__ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowercase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 588
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any=0 ):
lowerCamelCase_ : Tuple =np.random.RandomState(snake_case__ )
lowerCamelCase_ : Union[str, Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[str] =self.get_dummy_inputs()
lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images
lowerCamelCase_ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Dict =np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Union[str, Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Union[str, Any] =np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Any =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : str =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : Dict =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Tuple =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Optional[Any] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : str =pipe(**snake_case__ ).images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[int] =np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =self.get_dummy_inputs()
lowerCamelCase_ : Tuple =pipe(**snake_case__ ).images
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : List[Any] =np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[int] =pipe(**snake_case__ )
lowerCamelCase_ : Dict =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : Any =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : Union[str, Any] =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Any =text_inputs["input_ids"]
lowerCamelCase_ : Dict =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCamelCase_ : Union[str, Any] =prompt_embeds
# forward
lowerCamelCase_ : Tuple =pipe(**snake_case__ )
lowerCamelCase_ : List[str] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * ["this is a negative prompt"]
lowerCamelCase_ : Tuple =negative_prompt
lowerCamelCase_ : List[str] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : str =self.get_dummy_inputs()
lowerCamelCase_ : int =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : List[Any] =[]
for p in [prompt, negative_prompt]:
lowerCamelCase_ : Tuple =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Dict =text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCamelCase_ , lowerCamelCase_ : int =embeds
# forward
lowerCamelCase_ : str =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =ort.SessionOptions()
lowerCamelCase_ : List[Any] =False
return options
def UpperCAmelCase__ ( self : Tuple ):
# using the PNDM scheduler by default
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] ="A painting of a squirrel eating a burger"
np.random.seed(0 )
lowerCamelCase_ : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
lowerCamelCase_ : Union[str, Any] =output.images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : List[str] =np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] =DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int ="open neural network exchange"
lowerCamelCase_ : List[Any] =np.random.RandomState(0 )
lowerCamelCase_ : str =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Optional[int] =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Any =np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] ="open neural network exchange"
lowerCamelCase_ : str =np.random.RandomState(0 )
lowerCamelCase_ : Optional[int] =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Dict =np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any =0
def test_callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : np.ndarray ) -> None:
lowerCamelCase_ : Optional[int] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : List[str] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : Optional[Any] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : List[Any] =np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowerCamelCase_ : Any =False
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict ="Andromeda galaxy in a bottle"
lowerCamelCase_ : Union[str, Any] =np.random.RandomState(0 )
pipe(
prompt=snake_case__ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case__ , callback=snake_case__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case__ , snake_case__ )
assert pipe.safety_checker is None
lowerCamelCase_ : Tuple =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ : Any =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 153
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
def A_ ( a , a ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
| 0
|
def __A ( _lowercase ):
'''simple docstring'''
assert column_title.isupper()
_A = 0
_A = len(_lowercase ) - 1
_A = 0
while index >= 0:
_A = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 484
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_A = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_A = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_A = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_A = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
_A = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_A = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
_A = tf.keras.preprocessing.image.img_to_array(test_image)
_A = np.expand_dims(test_image, axis=0)
_A = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_A = '''Normal'''
if result[0][0] == 1:
_A = '''Abnormality detected'''
| 325
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
if prompt is not None:
lowerCAmelCase_ = prompt
if generate_kwargs is not None:
lowerCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = load_image(UpperCamelCase__ )
if prompt is not None:
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise ValueError(
f"Received an invalid text input, got - {type(UpperCamelCase__ )} - but expected a single string. "
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCAmelCase_ = self.model.config.model_type
if model_type == "git":
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=self.framework )
lowerCAmelCase_ = self.tokenizer(text=UpperCamelCase__, add_special_tokens=UpperCamelCase__ ).input_ids
lowerCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase_ = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, header_text=UpperCamelCase__, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=self.framework )
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, return_tensors=self.framework )
model_inputs.update(UpperCamelCase__ )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
lowerCAmelCase_ = self.image_processor(images=UpperCamelCase__, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase_ = None
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], UpperCamelCase__ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCAmelCase_ = None
if generate_kwargs is None:
lowerCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase_ = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase_ = self.model.generate(UpperCamelCase__, **UpperCamelCase__, **UpperCamelCase__ )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
for output_ids in model_outputs:
lowerCAmelCase_ = {
'''generated_text''': self.tokenizer.decode(
UpperCamelCase__, skip_special_tokens=UpperCamelCase__, )
}
records.append(UpperCamelCase__ )
return records
| 325
| 1
|
def UpperCamelCase( __UpperCamelCase : int ):
lowerCAmelCase_ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 171
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ : str = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
A__ : Union[str, Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
A__ : Any = '''|'''.join(sys.argv[1:])
A__ : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
A__ : List[str] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 171
| 1
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _lowercase ( lowercase__ = "mumbai" ):
__lowerCAmelCase : int = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__lowerCAmelCase : int = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__lowerCAmelCase : str = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}")
| 583
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_UpperCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowercase :
def __init__( self , A_ , A_=16 , A_=13 , A_=7 , A_=14 , A_=10 , A_=19 , A_=5 , A_=4 , A_=True , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=[1, 2, 3, 4, 5] , A_=25 , A_=5 , ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : int = prediction_length
__lowerCAmelCase : str = context_length
__lowerCAmelCase : Any = cardinality
__lowerCAmelCase : Tuple = num_time_features
__lowerCAmelCase : List[str] = lags_sequence
__lowerCAmelCase : Any = embedding_dimension
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = context_length
__lowerCAmelCase : Any = prediction_length + label_length
__lowerCAmelCase : Union[str, Any] = label_length
__lowerCAmelCase : Any = moving_average
__lowerCAmelCase : Optional[int] = autocorrelation_factor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = config.context_length + max(config.lags_sequence )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_config()
__lowerCAmelCase : Tuple = self.prepare_autoformer_inputs_dict(A_ )
return config, inputs_dict
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoformerModel(config=A_ ).to(A_ ).eval()
__lowerCAmelCase : Optional[int] = model(**A_ )
__lowerCAmelCase : Dict = outputs.encoder_last_hidden_state
__lowerCAmelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = model.get_encoder()
encoder.save_pretrained(A_ )
__lowerCAmelCase : List[Any] = AutoformerEncoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Any = model.create_network_inputs(**A_ )
__lowerCAmelCase, __lowerCAmelCase : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase : Optional[Any] = encoder(inputs_embeds=A_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(A_ )
__lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase : List[str] = decoder(
trend=A_ , inputs_embeds=A_ , encoder_hidden_states=A_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = AutoformerModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inspect.signature(getattr(A_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(A_ )
__lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A_ )] , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''seq_length''' , A_ )
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , A_ )
__lowerCAmelCase : Any = getattr(self.model_tester , '''encoder_seq_length''' , A_ )
__lowerCAmelCase : List[str] = getattr(self.model_tester , '''d_model''' , A_ )
__lowerCAmelCase : int = getattr(self.model_tester , '''num_attention_heads''' , A_ )
__lowerCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Dict = outputs.encoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase : Optional[Any] = len(A_ )
__lowerCAmelCase : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A_ , A_ )
# decoder attentions
__lowerCAmelCase : List[str] = outputs.decoder_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 2 , len(A_ ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowercase ( lowercase__="train-batch.pt" ):
__lowerCAmelCase : List[str] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowercase__ , repo_type='''dataset''' )
__lowerCAmelCase : Tuple = torch.load(lowercase__ , map_location=lowercase__ )
return batch
@require_torch
@slow
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Tuple = prepare_batch()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__lowerCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__lowerCAmelCase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__lowerCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A_ )
__lowerCAmelCase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=A_ )
__lowerCAmelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A_ , rtol=1e-1 ) )
| 583
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
_snake_case = s.rsplit(__lowerCamelCase , __lowerCamelCase )
return new.join(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> List[Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
_snake_case = {}
_snake_case = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_snake_case = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
_snake_case = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
_snake_case = rreplace(__lowerCamelCase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
_snake_case = rreplace(__lowerCamelCase , '''.b''' , '''.bias''' , 1 )
_snake_case = value.float()
return upgrade
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict=None , __lowerCamelCase : int=True ) -> Tuple:
from dall_e import Encoder
_snake_case = Encoder()
if os.path.exists(__lowerCamelCase ):
_snake_case = torch.load(__lowerCamelCase )
else:
_snake_case = torch.hub.load_state_dict_from_url(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = ckpt.state_dict()
encoder.load_state_dict(__lowerCamelCase )
if config_path is not None:
_snake_case = FlavaImageCodebookConfig.from_pretrained(__lowerCamelCase )
else:
_snake_case = FlavaImageCodebookConfig()
_snake_case = FlavaImageCodebook(__lowerCamelCase ).eval()
_snake_case = encoder.state_dict()
_snake_case = upgrade_state_dict(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_snake_case = hf_model.state_dict()
_snake_case = count_parameters(__lowerCamelCase )
_snake_case = count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 224
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase__ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCAmelCase__ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
UpperCAmelCase__ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase ( self : List[str] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowercase ( self : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ):
_snake_case = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_snake_case = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
_snake_case = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
_snake_case = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 224
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase = ["""input_features""", """attention_mask"""]
def __init__( self : str , snake_case_ : str=8_0 , snake_case_ : Optional[int]=1_6_0_0_0 , snake_case_ : Optional[int]=8_0 , snake_case_ : Dict=0.0 , snake_case_ : List[Any]=True , snake_case_ : Tuple=True , snake_case_ : Optional[int]=True , **snake_case_ : Optional[int] , ):
'''simple docstring'''
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
snake_case__ : Any = num_mel_bins
snake_case__ : Any = do_ceptral_normalize
snake_case__ : List[str] = normalize_means
snake_case__ : List[Any] = normalize_vars
snake_case__ : int = True
def __magic_name__ ( self : List[Any] , snake_case_ : np.ndarray , ):
'''simple docstring'''
snake_case__ : int = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
snake_case__ : Dict = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
snake_case__ : Optional[Any] = ta_kaldi.fbank(__lowerCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __magic_name__ ( snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : Optional[bool] = True , snake_case_ : Optional[bool] = True , snake_case_ : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
snake_case__ : int = x[:input_length].mean(axis=0 )
snake_case__ : int = np.subtract(__lowerCamelCase , __lowerCamelCase )
if normalize_vars:
snake_case__ : int = x[:input_length].std(axis=0 )
snake_case__ : List[Any] = np.divide(__lowerCamelCase , __lowerCamelCase )
if input_length < x.shape[0]:
snake_case__ : Optional[int] = padding_value
# make sure array is in float32
snake_case__ : List[Any] = x.astype(np.floataa )
return x
def __magic_name__ ( self : Any , snake_case_ : List[np.ndarray] , snake_case_ : Optional[np.ndarray] = None ):
'''simple docstring'''
snake_case__ : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCamelCase , __lowerCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCamelCase , __lowerCamelCase )
]
def __call__( self : Dict , snake_case_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , **snake_case_ : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case__ : int = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
snake_case__ : int = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : Optional[Any] = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
snake_case__ : List[Any] = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Dict = [raw_speech]
# extract fbank features
snake_case__ : Dict = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
snake_case__ : Optional[Any] = BatchFeature({'''input_features''': features} )
snake_case__ : Dict = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
# make sure list is in array format
snake_case__ : Tuple = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowerCamelCase ):
snake_case__ : List[str] = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
snake_case__ : Optional[int] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case__ : Tuple = [np.asarray(__lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case__ : Any = (
np.array(__lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCamelCase , max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case__ : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowerCamelCase )
if return_tensors is not None:
snake_case__ : Optional[Any] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 705
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : Tuple , snake_case_ : List[str]=1_3 , snake_case_ : Dict=7 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : int=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=9_9 , snake_case_ : List[Any]=2_4 , snake_case_ : Optional[Any]=2 , snake_case_ : Union[str, Any]=6 , snake_case_ : Any=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Dict=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : int=3 , snake_case_ : int=None , snake_case_ : Union[str, Any]=1_0_0_0 , ):
'''simple docstring'''
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Tuple = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Optional[int] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = num_labels
snake_case__ : Dict = scope
snake_case__ : Tuple = range_bbox
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ : Optional[int] = bbox[i, j, 3]
snake_case__ : Dict = bbox[i, j, 1]
snake_case__ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ : Dict = bbox[i, j, 2]
snake_case__ : List[str] = bbox[i, j, 0]
snake_case__ : Dict = t
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : str = None
if self.use_token_type_ids:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Optional[int] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : Optional[Any] = LiltModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
snake_case__ : Optional[Any] = model(snake_case_ , bbox=snake_case_ , token_type_ids=snake_case_ )
snake_case__ : Union[str, Any] = model(snake_case_ , bbox=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : int , snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : List[Any] = self.num_labels
snake_case__ : str = LiltForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[Any] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : int , ):
'''simple docstring'''
snake_case__ : Optional[int] = LiltForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[int] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : Any ):
'''simple docstring'''
return True
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = LiltModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : int = type
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def __magic_name__ ( self : str ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = LiltModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Any = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(snake_case_ )
snake_case__ : int = torch.tensor([[1, 2]] , device=snake_case_ )
snake_case__ : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(input_ids=snake_case_ , bbox=snake_case_ )
snake_case__ : Tuple = torch.Size([1, 2, 7_6_8] )
snake_case__ : Dict = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=snake_case_ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case_ , atol=1e-3 ) )
| 502
| 0
|
"""simple docstring"""
from collections import defaultdict
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = True
for v in tree[start]:
if v not in visited:
ret += dfs(a_ )
if ret % 2 == 0:
cuts.append(a_ )
return ret
def __UpperCAmelCase ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
UpperCAmelCase__ , UpperCAmelCase__ = 1_0, 9
UpperCAmelCase__ = defaultdict(list)
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 277
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : list[list[int]] ):
"""simple docstring"""
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCamelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCamelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: int = 6 ):
'''simple docstring'''
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
self.create_linked_list(__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Node()
_lowerCamelCase : Optional[int] = current_node
_lowerCamelCase : Any = current_node
_lowerCamelCase : int = current_node
for _ in range(1 ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = Node()
_lowerCamelCase : Any = current_node
_lowerCamelCase : Tuple = previous_node
_lowerCamelCase : Optional[Any] = current_node
_lowerCamelCase : Union[str, Any] = self.front
_lowerCamelCase : Any = previous_node
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _lowercase ( self: Any ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowerCamelCase : List[str] = self.rear.next
if self.rear:
_lowerCamelCase : List[str] = data
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowerCamelCase : Any = self.front.data
_lowerCamelCase : Dict = None
return data
_lowerCamelCase : List[Any] = self.front
_lowerCamelCase : Optional[Any] = old_front.next
_lowerCamelCase : str = old_front.data
_lowerCamelCase : List[str] = None
return data
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self.is_empty():
raise Exception("Empty Queue" )
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class A_ :
def __init__( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any | None = None
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase : Tuple = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase : int = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
__UpperCAmelCase : int = '▁'
class __lowerCAmelCase (__UpperCamelCase ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = AlbertTokenizer
def __init__( self , a=None , a=None , a=True , a=True , a=False , a="[CLS]" , a="[SEP]" , a="<unk>" , a="[SEP]" , a="<pad>" , a="[CLS]" , a="[MASK]" , **a , ):
"""simple docstring"""
snake_case_ :Tuple = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
snake_case_ :Tuple = do_lower_case
snake_case_ :str = remove_space
snake_case_ :List[str] = keep_accents
snake_case_ :Union[str, Any] = vocab_file
snake_case_ :Any = False if not self.vocab_file else True
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :List[str] = [self.sep_token_id]
snake_case_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :Union[str, Any] = [self.sep_token_id]
snake_case_ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a , a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ :Dict = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 584
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """spiece.model"""}
a_ : Any = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : int = {"""bert_for_seq_generation""": 5_1_2}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = []
_A = ["input_ids", "attention_mask"]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<pad>" , __a="<::::>" , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _a (self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a (self ):
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__(self , __a ):
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase__ )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowerCamelCase = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 700
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = SMALL_MODEL_IDENTIFIER
lowerCamelCase = "pt"
lowerCamelCase = "tf"
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "mock_framework"
# Framework provided - return whatever the user provides
lowerCamelCase = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def _a (self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 484
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self: Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
_A = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def __A ( self: Any ) -> int:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def __A ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
_A = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_A = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def __A ( self: List[Any] ) -> int:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_A = DDPMScheduler()
_A = AudioDiffusionPipeline(vqvae=__A , unet=self.dummy_unet , mel=__A , scheduler=__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = torch.Generator(device=__A ).manual_seed(42 )
_A = pipe(generator=__A , steps=4 )
_A = output.audios[0]
_A = output.images[0]
_A = torch.Generator(device=__A ).manual_seed(42 )
_A = pipe(generator=__A , steps=4 , return_dict=__A )
_A = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_A = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_A = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_A = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_A = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_A = DDIMScheduler()
_A = self.dummy_vqvae_and_unet
_A = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__A , scheduler=__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
np.random.seed(0 )
_A = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_A = torch.Generator(device=__A ).manual_seed(42 )
_A = pipe(raw_audio=__A , generator=__A , start_step=5 , steps=10 )
_A = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_A = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_A = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_A = self.dummy_unet_condition
_A = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__A , mel=__A , scheduler=__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
np.random.seed(0 )
_A = torch.rand((1, 1, 10) )
_A = pipe(generator=__A , encoding=__A )
_A = output.images[0]
_A = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_A = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Union[str, Any] ) -> Tuple:
_A = torch_device
_A = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = torch.Generator(device=__A ).manual_seed(42 )
_A = pipe(generator=__A )
_A = output.audios[0]
_A = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_A = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_A = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 484
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A = 2
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , *, # begin keyword-only arguments
__A: Any="<s>" , __A: List[str]="<pad>" , __A: Optional[Any]="</s>" , __A: Dict="<unk>" , __A: Any=None , ) -> Tuple:
_A ,_A ,_A ,_A = bos, unk, pad, eos
_A = []
_A = []
_A = {}
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__A )
_A = len(self.symbols )
def __eq__( self: Any , __A: Any ) -> Optional[Any]:
return self.indices == other.indices
def __getitem__( self: Tuple , __A: Optional[int] ) -> int:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self: Optional[Any] ) -> Optional[Any]:
return len(self.symbols )
def __contains__( self: Dict , __A: List[str] ) -> Union[str, Any]:
return sym in self.indices
@classmethod
def __A ( cls: Tuple , __A: Optional[Any] ) -> Optional[Any]:
_A = cls()
d.add_from_file(__A )
return d
def __A ( self: List[Any] , __A: List[str] , __A: List[Any]=1 , __A: List[Any]=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
_A = self.indices[word]
_A = self.count[idx] + n
return idx
else:
_A = len(self.symbols )
_A = idx
self.symbols.append(__A )
self.count.append(__A )
return idx
def __A ( self: Optional[Any] , __A: Optional[int] ) -> str:
return 0
def __A ( self: List[str] , __A: Optional[Any] ) -> List[Any]:
if isinstance(__A , __A ):
try:
with open(__A , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__A ) )
return
_A = f.readlines()
_A = self._load_meta(__A )
for line in lines[indices_start_line:]:
try:
_A ,_A = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_A = True
_A ,_A = line.rsplit(''' ''' , 1 )
else:
_A = False
_A = int(__A )
_A = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(__A ) )
self.add_symbol(__A , n=__A , overwrite=__A )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = dict((re.sub(R'''@@$''' , '''''' , _lowercase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _lowercase ), v) for k, v in d.items() )
_A = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
_A = d[k] # restore
return da
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if not os.path.exists(_lowercase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(_lowercase , exist_ok=_lowercase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_A = os.path.join(_lowercase , '''checkpoint.pt''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
_A = torch.load(_lowercase , map_location='''cpu''' )
_A = chkpt['''cfg''']['''model''']
# dicts
_A = os.path.join(_lowercase , '''dict.txt''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
_A = Dictionary.load(_lowercase )
_A = rewrite_dict_keys(src_dict.indices )
_A = len(_lowercase )
_A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# merges_file (bpecodes)
_A = os.path.join(_lowercase , '''bpecodes''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
_A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(_lowercase , _lowercase )
# model config
_A = os.path.join(_lowercase , '''config.json''' )
_A = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# tokenizer config
_A = os.path.join(_lowercase , _lowercase )
_A = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# model
_A = chkpt['''model''']
# remove unneeded keys
_A = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(_lowercase , _lowercase )
_A = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_A = model_state_dict.pop(_lowercase )
else:
_A = model_state_dict.pop(_lowercase )
_A = BioGptConfig.from_pretrained(_lowercase )
_A = BioGptForCausalLM(_lowercase )
# check that it loads ok
model_new.load_state_dict(_lowercase )
# save
_A = os.path.join(_lowercase , _lowercase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print('''Conversion is done!''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 484
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __A ( _A , _A=False , _A=False ):
"""simple docstring"""
__a = "backbone." if is_semantic else ""
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __A ( _A , _A , _A=False , _A=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__a = "backbone." if is_semantic else ""
# queries, keys and values
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
__a = in_proj_weight[
: config.hidden_size, :
]
__a = q_bias
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
__a = gamma_a
__a = gamma_a
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = dct.pop(_A )
__a = val
def __A ( ):
"""simple docstring"""
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __A ( _A , _A , _A=False ):
"""simple docstring"""
__a = False if "rvlcdip" in checkpoint_url else True
__a = BeitConfig(use_absolute_position_embeddings=_A , use_mask_token=_A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__a = 1024
__a = 4096
__a = 24
__a = 16
# labels
if "rvlcdip" in checkpoint_url:
__a = 16
__a = "huggingface/label-files"
__a = "rvlcdip-id2label.json"
__a = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__a = torch.hub.load_state_dict_from_url(_A , map_location="cpu" )["model"]
__a = create_rename_keys(_A , has_lm_head=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , has_lm_head=_A )
# load HuggingFace model
__a = BeitForMaskedImageModeling(_A ) if has_lm_head else BeitForImageClassification(_A )
model.eval()
model.load_state_dict(_A )
# Check outputs on an image
__a = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_A )
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="pt" )
__a = encoding["pixel_values"]
__a = model(_A )
__a = outputs.logits
# verify logits
__a = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_A ), "Shape of logits not as expected"
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
if has_lm_head:
__a = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__a = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_A , )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_A , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 525
| 1
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class __lowercase ( _A , unittest.TestCase ):
lowercase = BartphoTokenizer
lowercase = False
lowercase = True
def __a ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
lowercase = ["▁This", "▁is", "▁a", "▁t", "est"]
lowercase = dict(zip(a_ , range(len(a_ ) ) ) )
lowercase = {"unk_token": "<unk>"}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
lowercase = BartphoTokenizer(a_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : int , **__lowerCamelCase : Any ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __a ( self : str , __lowerCamelCase : List[Any] ) -> int:
'''simple docstring'''
lowercase = "This is a là test"
lowercase = "This is a<unk><unk> test"
return input_text, output_text
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = BartphoTokenizer(a_ , self.monolingual_vocab_file , **self.special_tokens_map )
lowercase = "This is a là test"
lowercase = "▁This ▁is ▁a ▁l à ▁t est".split()
lowercase = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowercase = tokens + [tokenizer.unk_token]
lowercase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
| 604
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase = True
except ImportError:
lowerCAmelCase = False
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A ( a_ : Namespace ):
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class lowerCamelCase ( _A ):
@staticmethod
def _lowerCamelCase ( a_ ):
lowerCAmelCase : Optional[int] = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=a_ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=a_ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=a_ )
def __init__( self , a_ , a_ , a_=None , *a_ ):
lowerCAmelCase : Any = testing
lowerCAmelCase : str = testing_file
lowerCAmelCase : Optional[int] = path
def _lowerCamelCase ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(a_ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCAmelCase : List[str] = (
Path(a_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase : str = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(a_ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCAmelCase : Dict = json.load(a_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=a_ , extra_context=a_ , )
lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCAmelCase : str = json.load(a_ )
lowerCAmelCase : List[str] = configuration["lowercase_modelname"]
lowerCAmelCase : List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F'''{directory}/configuration.json''' )
lowerCAmelCase : Optional[int] = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : Optional[int] = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : Optional[Any] = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(a_ , exist_ok=a_ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=a_ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(a_ ):
with open(a_ , "r" ) as f:
lowerCAmelCase : Dict = f.readlines()
with open(a_ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(a_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(a_ , a_ , a_ ):
# Create temp file
lowerCAmelCase , lowerCAmelCase : Tuple = mkstemp()
lowerCAmelCase : int = False
with fdopen(a_ , "w" ) as new_file:
with open(a_ ) as old_file:
for line in old_file:
new_file.write(a_ )
if line_to_copy_below in line:
lowerCAmelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(a_ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(a_ , a_ )
# Remove original file
remove(a_ )
# Move new file
move(a_ , a_ )
def skip_units(a_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(a_ ):
with open(a_ ) as datafile:
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase : Optional[int] = line.split("\"" )[1]
lowerCAmelCase : Tuple = skip_units(a_ )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase : Any = line.split("\"" )[1]
lowerCAmelCase : List[str] = skip_units(a_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(a_ , a_ , a_ )
lowerCAmelCase : List[str] = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase : Any = []
elif "##" not in line:
lines_to_copy.append(a_ )
remove(a_ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(a_ )
| 525
| 0
|
'''simple docstring'''
import os
SCREAMING_SNAKE_CASE = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : str =0
lowercase : List[Any] =0
while index < len(__A ) - 1:
lowercase : List[Any] =SYMBOLS[numerals[index]]
lowercase : Tuple =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase_ ( __A : int ) -> str:
"""simple docstring"""
lowercase : Optional[Any] =''''''
lowercase : List[Any] =num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
lowercase : List[Any] =num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
lowercase : int =num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase_ ( __A : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
lowercase : List[str] =0
with open(os.path.dirname(__A ) + roman_numerals_filename ) as filea:
lowercase : List[Any] =filea.readlines()
for line in lines:
lowercase : List[Any] =line.strip()
lowercase : Optional[int] =parse_roman_numerals(__A )
lowercase : int =generate_roman_numerals(__A )
savings += len(__A ) - len(__A )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> List[Any]:
if isinstance(__lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _A :
def a ( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ):
"""simple docstring"""
pass
def a ( self : Dict ):
"""simple docstring"""
pass
def a ( self : Optional[Any] ):
"""simple docstring"""
pass
def a ( self : Optional[int] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : float ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase__ , lowerCamelCase__ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def a ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : str ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
__UpperCamelCase : Tuple = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def a ( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : List[Any] = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
__UpperCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
__UpperCamelCase : List[Any] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : List[str] = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
__UpperCamelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
__UpperCamelCase : Dict = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__UpperCamelCase : List[str] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[str] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__UpperCamelCase : List[Any] = after_output[0]
__UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-3 )
def a ( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Dict ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : int = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any = {"""vision_model""": vision_model, """text_model""": text_model}
__UpperCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] = model(
input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_attentions=lowerCamelCase__ )
__UpperCamelCase : Any = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase : Tuple = to_atuple(vision_model.config.image_size )
__UpperCamelCase : Dict = to_atuple(vision_model.config.patch_size )
__UpperCamelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__UpperCamelCase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__UpperCamelCase : int = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ):
"""simple docstring"""
pt_model.to(lowerCamelCase__ )
pt_model.eval()
# prepare inputs
__UpperCamelCase : Optional[int] = inputs_dict
__UpperCamelCase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__UpperCamelCase : Any = pt_model(**lowerCamelCase__ ).to_tuple()
__UpperCamelCase : str = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase : int = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Any = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
pt_model_loaded.to(lowerCamelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
__UpperCamelCase : Optional[Any] = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output_loaded.numpy() , 4e-2 )
def a ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCamelCase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase__ )
__UpperCamelCase : int = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
__UpperCamelCase : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__UpperCamelCase : int = fx_state
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple = VisionTextDualEncoderModel(lowerCamelCase__ )
__UpperCamelCase : List[Any] = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
__UpperCamelCase : str = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase__ )
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase__ )
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase__ )
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase__ )
@is_pt_flax_cross_test
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : Any = self.prepare_config_and_inputs()
__UpperCamelCase : Tuple = config_inputs_dict.pop("""vision_config""" )
__UpperCamelCase : Optional[Any] = config_inputs_dict.pop("""text_config""" )
__UpperCamelCase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.check_equivalence_flax_to_pt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = self.get_pretrained_model_and_inputs()
__UpperCamelCase : int = model_a(**lowerCamelCase__ )
__UpperCamelCase : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Tuple = model_a(**lowerCamelCase__ )
__UpperCamelCase : Any = after_outputs[0]
__UpperCamelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
@require_flax
class _A ( UpperCAmelCase_ , unittest.TestCase ):
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
__UpperCamelCase : str = 13
__UpperCamelCase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__UpperCamelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__UpperCamelCase : Tuple = random_attention_mask([batch_size, 4] )
__UpperCamelCase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def a ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = FlaxViTModel(lowerCamelCase__ )
__UpperCamelCase : int = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : int = FlaxViTModelTester(self )
__UpperCamelCase : Tuple = FlaxBertModelTester(self )
__UpperCamelCase : List[str] = vit_model_tester.prepare_config_and_inputs()
__UpperCamelCase : int = bert_model_tester.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = vision_config_and_inputs
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _A ( UpperCAmelCase_ , unittest.TestCase ):
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
__UpperCamelCase : Any = 13
__UpperCamelCase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__UpperCamelCase : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__UpperCamelCase : List[str] = random_attention_mask([batch_size, 4] )
__UpperCamelCase : Optional[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def a ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Tuple = FlaxCLIPVisionModel(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = FlaxCLIPVisionModelTester(self )
__UpperCamelCase : Optional[Any] = FlaxBertModelTester(self )
__UpperCamelCase : Any = clip_model_tester.prepare_config_and_inputs()
__UpperCamelCase : Any = bert_model_tester.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase : Optional[int] = vision_config_and_inputs
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _A ( unittest.TestCase ):
@slow
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__UpperCamelCase : Tuple = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__UpperCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCamelCase : List[Any] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="""np""" )
__UpperCamelCase : Optional[int] = model(**lowerCamelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__UpperCamelCase : Tuple = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase__ , atol=1e-3 ) )
| 269
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> List[str]:
__UpperCamelCase : List[str] = R"""\w+[.]\d+"""
__UpperCamelCase : Optional[int] = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
__UpperCamelCase : str = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> str:
__UpperCamelCase : Dict = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase : Dict = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase : Any = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase : List[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase : List[Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase : Tuple = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int]=42 ) -> int:
# Step 1: Convert pytorch tensor to numpy
__UpperCamelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase : str = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
__UpperCamelCase : Union[str, Any] = flatten_dict(__lowerCAmelCase )
__UpperCamelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase : str = rename_key(__lowerCAmelCase )
__UpperCamelCase : Any = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase : Tuple = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__UpperCamelCase : Any = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 269
| 1
|
'''simple docstring'''
_lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 715
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase__ ( ):
__snake_case = torch.nn.Linear(2 , 4 )
__snake_case = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case = torch.optim.lr_scheduler.OneCycleLR(a , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase__ ( a ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase__ ( a ):
__snake_case = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(a )
class a_ ( UpperCAmelCase__ ):
@require_cuda
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowerCAmelCase ):
__snake_case = Accelerator(cpu=__lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
__snake_case = Accelerator()
__snake_case = GradientState()
assert state.num_steps == 1
__snake_case = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowercase__ ( self : Optional[Any] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowercase__ ( self : Optional[int] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowercase__ ( self : Union[str, Any] ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowerCAmelCase : List[str] , **__lowerCAmelCase : Any ):
pass
with patch('torch.cuda.set_device' , __lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__snake_case = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = get_signature(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = get_signature(__lowerCAmelCase )
# saving hook
def save_config(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str ):
__snake_case = {'class_name': models[0].__class__.__name__}
with open(os.path.join(__lowerCAmelCase , 'data.json' ) , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# loading hook
def load_config(__lowerCAmelCase : int , __lowerCAmelCase : str ):
with open(os.path.join(__lowerCAmelCase , 'data.json' ) , 'r' ) as f:
__snake_case = json.load(__lowerCAmelCase )
__snake_case = config['class_name']
__snake_case = accelerator.register_save_state_pre_hook(__lowerCAmelCase )
__snake_case = accelerator.register_load_state_pre_hook(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def lowercase__ ( self : Optional[int] ):
from transformers import AutoModelForCausalLM
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map={'': 0} , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
def lowercase__ ( self : List[Any] ):
from transformers import AutoModelForCausalLM
__snake_case = Accelerator()
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 'cpu'
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=__lowerCAmelCase , load_in_abit=__lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=__lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
__snake_case = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self : Any ):
from transformers import AutoModelForCausalLM
__snake_case = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
__snake_case = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
__snake_case = accelerator.prepare(__lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self : Any ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(__lowerCAmelCase )
@require_cuda
def lowercase__ ( self : str ):
__snake_case = torch.nn.Linear(1_0 , 1_0 )
__snake_case = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case = Accelerator(cpu=__lowerCAmelCase )
__snake_case = accelerator.prepare(__lowerCAmelCase )
| 427
| 0
|
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( __UpperCamelCase = 1_00_01 ):
try:
__lowercase : Optional[int] = int(__UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
__lowercase : list[int] = []
__lowercase : Optional[int] = 2
while len(__UpperCamelCase ) < nth:
if is_prime(__UpperCamelCase ):
primes.append(__UpperCamelCase )
num += 1
else:
num += 1
return primes[len(__UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F"{solution() = }")
| 76
|
"""simple docstring"""
def A ( __snake_case: str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 545
| 0
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 716
|
import os
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = os.path.dirname(os.path.realpath(snake_case__ ) )
__snake_case :Union[str, Any] = os.path.join(snake_case__ ,"""triangle.txt""" )
with open(snake_case__ ) as f:
__snake_case :int = f.readlines()
__snake_case :int = []
for line in triangle:
__snake_case :List[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 ,len(snake_case__ ) ):
for j in range(len(a[i] ) ):
__snake_case :Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case :Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ ,snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 291
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : Optional[int] = '''sew-d'''
def __init__( self : str , lowercase__ : Tuple=32 , lowercase__ : Optional[Any]=768 , lowercase__ : Any=12 , lowercase__ : Optional[int]=12 , lowercase__ : int=3072 , lowercase__ : int=2 , lowercase__ : str=512 , lowercase__ : Tuple=256 , lowercase__ : Tuple=True , lowercase__ : List[Any]=True , lowercase__ : Optional[int]=("p2c", "c2p") , lowercase__ : Union[str, Any]="layer_norm" , lowercase__ : Tuple="gelu_python" , lowercase__ : Optional[int]=0.1 , lowercase__ : str=0.1 , lowercase__ : Dict=0.1 , lowercase__ : Optional[int]=0.0 , lowercase__ : Optional[int]=0.1 , lowercase__ : Union[str, Any]=0.02 , lowercase__ : List[Any]=1e-7 , lowercase__ : str=1e-5 , lowercase__ : Union[str, Any]="group" , lowercase__ : List[str]="gelu" , lowercase__ : Optional[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__ : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__ : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__ : List[Any]=False , lowercase__ : Tuple=128 , lowercase__ : Optional[Any]=16 , lowercase__ : int=True , lowercase__ : Optional[int]=0.05 , lowercase__ : Optional[Any]=10 , lowercase__ : List[Any]=2 , lowercase__ : Dict=0.0 , lowercase__ : Any=10 , lowercase__ : Union[str, Any]=0 , lowercase__ : Dict="mean" , lowercase__ : Union[str, Any]=False , lowercase__ : Any=False , lowercase__ : Optional[Any]=256 , lowercase__ : Optional[Any]=0 , lowercase__ : str=1 , lowercase__ : Union[str, Any]=2 , **lowercase__ : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
a_ : Union[str, Any] = hidden_size
a_ : Optional[Any] = feat_extract_norm
a_ : Union[str, Any] = feat_extract_activation
a_ : Optional[int] = list(lowercase__ )
a_ : Tuple = list(lowercase__ )
a_ : Dict = list(lowercase__ )
a_ : Any = conv_bias
a_ : Union[str, Any] = num_conv_pos_embeddings
a_ : Any = num_conv_pos_embedding_groups
a_ : Tuple = len(self.conv_dim )
a_ : int = num_hidden_layers
a_ : Any = intermediate_size
a_ : Tuple = squeeze_factor
a_ : int = max_position_embeddings
a_ : List[Any] = position_buckets
a_ : List[Any] = share_att_key
a_ : List[str] = relative_attention
a_ : List[str] = norm_rel_ebd
a_ : Optional[Any] = list(lowercase__ )
a_ : str = hidden_act
a_ : Any = num_attention_heads
a_ : Optional[int] = hidden_dropout
a_ : Optional[Any] = attention_dropout
a_ : Optional[int] = activation_dropout
a_ : Any = feat_proj_dropout
a_ : Any = final_dropout
a_ : Dict = layer_norm_eps
a_ : List[str] = feature_layer_norm_eps
a_ : Optional[Any] = initializer_range
a_ : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ : Any = apply_spec_augment
a_ : Dict = mask_time_prob
a_ : Dict = mask_time_length
a_ : Optional[int] = mask_time_min_masks
a_ : Optional[int] = mask_feature_prob
a_ : Dict = mask_feature_length
a_ : List[Any] = mask_feature_min_masks
# ctc loss
a_ : str = ctc_loss_reduction
a_ : Tuple = ctc_zero_infinity
# sequence classification
a_ : Any = use_weighted_layer_sum
a_ : Any = classifier_proj_size
@property
def lowercase_ ( self : str ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 442
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
a_ : List[Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
a_ : str = 1024
a_ : List[str] = 4096
a_ : int = 24
a_ : int = 16
a_ : Optional[Any] = [5, 11, 17, 23]
a_ : Optional[int] = [256, 512, 1024, 1024]
a_ : Union[str, Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
a_ : Dict = 768
a_ : str = [1, 1, 1, 0.5]
a_ : Dict = [256, 512, 768, 768]
a_ : int = 150
a_ : Any = 16
a_ : Optional[int] = (1, 384, 384)
a_ : Optional[Any] = False
a_ : Dict = """project"""
if "ade" in checkpoint_url:
a_ : int = True
a_ : int = 768
a_ : Optional[int] = [1, 1, 1, 0.5]
a_ : Dict = 150
a_ : Any = 16
a_ : Optional[int] = """huggingface/label-files"""
a_ : Optional[int] = """ade20k-id2label.json"""
a_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
a_ : str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
a_ : Optional[int] = idalabel
a_ : Optional[Any] = {v: k for k, v in idalabel.items()}
a_ : int = [1, 150, 480, 480]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
a_ : Dict = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : str ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a_ : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
a_ : Union[str, Any] = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
a_ : Optional[int] = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
a_ : List[str] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
a_ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
a_ : Dict = name.replace("""proj""" , """projection""" )
if "blocks" in name:
a_ : Union[str, Any] = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
a_ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
a_ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
a_ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
a_ : int = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
a_ : Tuple = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
a_ : str = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
a_ : Optional[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
a_ : Dict = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
a_ : Tuple = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
a_ : str = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
a_ : List[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a_ : List[str] = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
a_ : Union[str, Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
a_ : Any = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
a_ : List[str] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
a_ : str = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
a_ : Dict = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a_ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
a_ : int = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
a_ : str = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
a_ : Optional[int] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
a_ : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
a_ : int = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
a_ : Any = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
a_ : int = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
a_ : Any = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
a_ : Union[str, Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
a_ : Any = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
a_ : Any = name.replace("""..""" , """.""" )
if "stem.conv" in name:
a_ : Optional[int] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
a_ : Dict = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
a_ : str = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
a_ : Any = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
a_ : Dict = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
a_ : List[str] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
a_ : int = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
a_ : Optional[Any] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
a_ : Any = in_proj_weight[: config.hidden_size, :]
a_ : int = in_proj_bias[: config.hidden_size]
a_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
a_ : str = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
a_ , a_ : Optional[Any] = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a_ : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
a_ : Any = state_dict.pop(UpperCamelCase__ )
a_ : Tuple = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
a_ : Any = DPTForSemanticSegmentation(UpperCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
a_ : str = 480 if """ade""" in checkpoint_url else 384
a_ : Optional[int] = DPTImageProcessor(size=UpperCamelCase__ )
a_ : Dict = prepare_img()
a_ : Any = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
a_ : Dict = model(**UpperCamelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
if show_prediction:
a_ : Any = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=UpperCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 442
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowerCamelCase ):
a__ = (UniPCMultistepScheduler,)
a__ = (('''num_inference_steps''', 25),)
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**__lowerCAmelCase )
return config
def A ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = dict(self.forward_default_kwargs )
__magic_name__ :Optional[int] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
__magic_name__ :Optional[Any] = self.dummy_sample
__magic_name__ :Any = 0.1 * sample
__magic_name__ :List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__magic_name__ :Optional[Any] = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :Tuple = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
__magic_name__ :str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
__magic_name__ :Optional[Any] = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
__magic_name__ :Any = dummy_past_residuals[: new_scheduler.config.solver_order]
__magic_name__ :Any = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__magic_name__ :str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
__magic_name__ :Union[str, Any] = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = dict(self.forward_default_kwargs )
__magic_name__ :List[Any] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
__magic_name__ :Tuple = self.dummy_sample
__magic_name__ :Optional[int] = 0.1 * sample
__magic_name__ :List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__magic_name__ :List[str] = self.get_scheduler_config()
__magic_name__ :Optional[int] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__ :Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
__magic_name__ :Tuple = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__ :int = dummy_past_residuals[: new_scheduler.config.solver_order]
__magic_name__ :Optional[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
__magic_name__ :str = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if scheduler is None:
__magic_name__ :Tuple = self.scheduler_classes[0]
__magic_name__ :int = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :Union[str, Any] = scheduler_class(**__lowerCAmelCase )
__magic_name__ :int = self.scheduler_classes[0]
__magic_name__ :Dict = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :Union[str, Any] = scheduler_class(**__lowerCAmelCase )
__magic_name__ :int = 1_0
__magic_name__ :Optional[Any] = self.dummy_model()
__magic_name__ :Tuple = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ :int = model(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = dict(self.forward_default_kwargs )
__magic_name__ :Optional[Any] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
__magic_name__ :Optional[int] = self.get_scheduler_config()
__magic_name__ :Dict = scheduler_class(**__lowerCAmelCase )
__magic_name__ :Union[str, Any] = self.dummy_sample
__magic_name__ :Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , '''set_timesteps''' ):
__magic_name__ :Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__ :List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
__magic_name__ :Tuple = dummy_past_residuals[: scheduler.config.solver_order]
__magic_name__ :str = scheduler.timesteps[5]
__magic_name__ :Dict = scheduler.timesteps[6]
__magic_name__ :List[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
__magic_name__ :str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
__magic_name__ :str = self.full_loop(scheduler=__lowerCAmelCase )
__magic_name__ :Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
__magic_name__ :Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__magic_name__ :str = DEISMultistepScheduler.from_config(scheduler.config )
__magic_name__ :int = DPMSolverMultistepScheduler.from_config(scheduler.config )
__magic_name__ :Optional[int] = UniPCMultistepScheduler.from_config(scheduler.config )
__magic_name__ :str = self.full_loop(scheduler=__lowerCAmelCase )
__magic_name__ :Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def A ( self ):
"""simple docstring"""
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
__magic_name__ :List[Any] = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.full_loop()
__magic_name__ :Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.full_loop(prediction_type='''v_prediction''' )
__magic_name__ :Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.scheduler_classes[0]
__magic_name__ :int = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
__magic_name__ :List[str] = scheduler_class(**__lowerCAmelCase )
__magic_name__ :Any = 1_0
__magic_name__ :Tuple = self.dummy_model()
__magic_name__ :Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
__magic_name__ :int = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :str = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 711
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :Dict = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :List[str] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.dummy_uncond_unet
__magic_name__ :Optional[int] = DDIMScheduler()
__magic_name__ :List[str] = self.dummy_vq_model
__magic_name__ :Tuple = LDMPipeline(unet=__lowerCAmelCase , vqvae=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
__magic_name__ :List[Any] = torch.manual_seed(0 )
__magic_name__ :List[str] = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' ).images
__magic_name__ :List[Any] = torch.manual_seed(0 )
__magic_name__ :Any = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=__lowerCAmelCase )[0]
__magic_name__ :Any = image[0, -3:, -3:, -1]
__magic_name__ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__magic_name__ :Union[str, Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__magic_name__ :Any = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
__magic_name__ :Optional[Any] = torch.manual_seed(0 )
__magic_name__ :Optional[int] = ldm(generator=__lowerCAmelCase , num_inference_steps=5 , output_type='''numpy''' ).images
__magic_name__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__magic_name__ :List[str] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
__magic_name__ :Tuple = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 180
| 0
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase_ , lowercase_ , lowercase_ = False, False, False
@dataclass
class __a :
lowerCamelCase : Optional[int] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
# Automatically constructed
lowerCamelCase : ClassVar[str] ="dict"
lowerCamelCase : ClassVar[Any] =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase : str =field(default='Audio' , init=__snake_case , repr=__snake_case )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase_ = BytesIO()
sf.write(UpperCAmelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
lowerCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2767
lowerCAmelCase_ = BytesIO(bytes() )
sf.write(UpperCAmelCase , UpperCAmelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowerCAmelCase_ , lowerCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowerCAmelCase_ = xsplitext(UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
lowerCAmelCase_ = token_per_repo_id or {}
lowerCAmelCase_ = path.split('''::''' )[-1]
try:
lowerCAmelCase_ = string_to_dict(UpperCAmelCase , config.HUB_DATASETS_URL )['''repo_id''']
lowerCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase_ = None
with xopen(UpperCAmelCase , '''rb''' , use_auth_token=UpperCAmelCase ) as f:
lowerCAmelCase_ , lowerCAmelCase_ = sf.read(UpperCAmelCase )
else:
lowerCAmelCase_ , lowerCAmelCase_ = sf.read(UpperCAmelCase )
lowerCAmelCase_ = array.T
if self.mono:
lowerCAmelCase_ = librosa.to_mono(UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase_ = librosa.resample(UpperCAmelCase , orig_sr=UpperCAmelCase , target_sr=self.sampling_rate )
lowerCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self ):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
lowerCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowerCAmelCase_ = pa.array([Audio().encode_example(UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowerCAmelCase_ = storage.field('''bytes''' )
else:
lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowerCAmelCase_ = storage.field('''path''' )
else:
lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase ):
with xopen(UpperCAmelCase , '''rb''' ) as f:
lowerCAmelCase_ = f.read()
return bytes_
lowerCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase_ = pa.array(
[os.path.basename(UpperCAmelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowerCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
| 552
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 552
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """xglm"""
a_ : Optional[int] = ["""past_key_values"""]
a_ : List[Any] = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=25_60_08 , __UpperCAmelCase=20_48 , __UpperCAmelCase=10_24 , __UpperCAmelCase=40_96 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = vocab_size
a_ = max_position_embeddings
a_ = d_model
a_ = ffn_dim
a_ = num_layers
a_ = attention_heads
a_ = activation_function
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = layerdrop
a_ = init_std
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
a_ = use_cache
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 210
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """realm"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=1_28 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_56 , __UpperCAmelCase=10 , __UpperCAmelCase=1E-3 , __UpperCAmelCase=5 , __UpperCAmelCase=3_20 , __UpperCAmelCase=13_35_37_18 , __UpperCAmelCase=50_00 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) ->Tuple:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
# Common config
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = retriever_proj_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = num_candidates
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
# Reader config
a_ = span_hidden_size
a_ = max_span_width
a_ = reader_layer_norm_eps
a_ = reader_beam_size
a_ = reader_seq_len
# Retrieval config
a_ = num_block_records
a_ = searcher_beam_size
| 210
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.